code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def tornadopath2openapi(urlspec, method): """Convert Tornado URLSpec to OpenAPI-compliant path. :param urlspec: :type urlspec: URLSpec :param method: Handler http method :type method: function """ if sys.version_info >= (3, 3): args = list(inspect.signature(method).parameters.keys())[1:] else: if getattr(method, '__tornado_coroutine__', False): method = method.__wrapped__ args = inspect.getargspec(method).args[1:] params = tuple('{{{}}}'.format(arg) for arg in args) try: path_tpl = urlspec.matcher._path except AttributeError: # tornado<4.5 path_tpl = urlspec._path path = (path_tpl % params) if path.count('/') > 1: path = path.rstrip('/?*') return path
Convert Tornado URLSpec to OpenAPI-compliant path. :param urlspec: :type urlspec: URLSpec :param method: Handler http method :type method: function
Below is the the instruction that describes the task: ### Input: Convert Tornado URLSpec to OpenAPI-compliant path. :param urlspec: :type urlspec: URLSpec :param method: Handler http method :type method: function ### Response: def tornadopath2openapi(urlspec, method): """Convert Tornado URLSpec to OpenAPI-compliant path. :param urlspec: :type urlspec: URLSpec :param method: Handler http method :type method: function """ if sys.version_info >= (3, 3): args = list(inspect.signature(method).parameters.keys())[1:] else: if getattr(method, '__tornado_coroutine__', False): method = method.__wrapped__ args = inspect.getargspec(method).args[1:] params = tuple('{{{}}}'.format(arg) for arg in args) try: path_tpl = urlspec.matcher._path except AttributeError: # tornado<4.5 path_tpl = urlspec._path path = (path_tpl % params) if path.count('/') > 1: path = path.rstrip('/?*') return path
def retrieve_instances(self, query=None): """ Executes either the supplied query or the one set via options (or the 'query' property). :param query: query to execute if not the currently set one :type query: str :return: the generated dataq :rtype: Instances """ if query is None: data = javabridge.call(self.jobject, "retrieveInstances", "()Lweka/core/Instances;") else: data = javabridge.call(self.jobject, "retrieveInstances", "(Ljava/lang/String;)Lweka/core/Instances;") return Instances(data)
Executes either the supplied query or the one set via options (or the 'query' property). :param query: query to execute if not the currently set one :type query: str :return: the generated dataq :rtype: Instances
Below is the the instruction that describes the task: ### Input: Executes either the supplied query or the one set via options (or the 'query' property). :param query: query to execute if not the currently set one :type query: str :return: the generated dataq :rtype: Instances ### Response: def retrieve_instances(self, query=None): """ Executes either the supplied query or the one set via options (or the 'query' property). :param query: query to execute if not the currently set one :type query: str :return: the generated dataq :rtype: Instances """ if query is None: data = javabridge.call(self.jobject, "retrieveInstances", "()Lweka/core/Instances;") else: data = javabridge.call(self.jobject, "retrieveInstances", "(Ljava/lang/String;)Lweka/core/Instances;") return Instances(data)
def apply(self, root): """ Apply the import (rule) to the specified schema. If the schema does not already contain an import for the I{namespace} specified here, it is added. @param root: A schema root. @type root: L{Element} """ if not self.filter.match(root, self.ns): return if self.exists(root): return node = Element('import', ns=self.xsdns) node.set('namespace', self.ns) if self.location is not None: node.set('schemaLocation', self.location) log.debug('inserting: %s', node) root.insert(node)
Apply the import (rule) to the specified schema. If the schema does not already contain an import for the I{namespace} specified here, it is added. @param root: A schema root. @type root: L{Element}
Below is the the instruction that describes the task: ### Input: Apply the import (rule) to the specified schema. If the schema does not already contain an import for the I{namespace} specified here, it is added. @param root: A schema root. @type root: L{Element} ### Response: def apply(self, root): """ Apply the import (rule) to the specified schema. If the schema does not already contain an import for the I{namespace} specified here, it is added. @param root: A schema root. @type root: L{Element} """ if not self.filter.match(root, self.ns): return if self.exists(root): return node = Element('import', ns=self.xsdns) node.set('namespace', self.ns) if self.location is not None: node.set('schemaLocation', self.location) log.debug('inserting: %s', node) root.insert(node)
def add_rule(self, config_name, value, plugins, destination): """ Adds a rule. Use add_action_rule or add_match_rule instead :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule) :param config_name: config_name of the plugin to add :param value: configuration information for the rule :param plugins: list of all available plugins :param destination: list to append plugin to (self.action_rules or self.match_rules) :return: """ if config_name in plugins: rule = plugins[config_name](value) destination.append(rule) else: self.logger.error("Plugin with config_name {0} not found".format(config_name)) raise IndexError("Plugin with config_name {0} not found".format(config_name))
Adds a rule. Use add_action_rule or add_match_rule instead :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule) :param config_name: config_name of the plugin to add :param value: configuration information for the rule :param plugins: list of all available plugins :param destination: list to append plugin to (self.action_rules or self.match_rules) :return:
Below is the the instruction that describes the task: ### Input: Adds a rule. Use add_action_rule or add_match_rule instead :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule) :param config_name: config_name of the plugin to add :param value: configuration information for the rule :param plugins: list of all available plugins :param destination: list to append plugin to (self.action_rules or self.match_rules) :return: ### Response: def add_rule(self, config_name, value, plugins, destination): """ Adds a rule. Use add_action_rule or add_match_rule instead :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule) :param config_name: config_name of the plugin to add :param value: configuration information for the rule :param plugins: list of all available plugins :param destination: list to append plugin to (self.action_rules or self.match_rules) :return: """ if config_name in plugins: rule = plugins[config_name](value) destination.append(rule) else: self.logger.error("Plugin with config_name {0} not found".format(config_name)) raise IndexError("Plugin with config_name {0} not found".format(config_name))
def resample_to_num_points(self, num_points): """ Resample the line to a specified number of points. :param num_points: Integer number of points the resulting line should have. :returns: A new line with that many points as requested. """ assert len(self.points) > 1, "can not resample the line of one point" section_length = self.get_length() / (num_points - 1) resampled_points = [self.points[0]] segment = 0 acc_length = 0 last_segment_length = 0 for i in range(num_points - 1): tot_length = (i + 1) * section_length while tot_length > acc_length and segment < len(self.points) - 1: last_segment_length = self.points[segment].distance( self.points[segment + 1] ) acc_length += last_segment_length segment += 1 p1, p2 = self.points[segment - 1:segment + 1] offset = tot_length - (acc_length - last_segment_length) if offset < 1e-5: # forward geodetic transformations for very small distances # are very inefficient (and also unneeded). if target point # is just 1 cm away from original (non-resampled) line vertex, # don't even bother doing geodetic calculations. resampled = p1 else: resampled = p1.equally_spaced_points(p2, offset)[1] resampled_points.append(resampled) return Line(resampled_points)
Resample the line to a specified number of points. :param num_points: Integer number of points the resulting line should have. :returns: A new line with that many points as requested.
Below is the the instruction that describes the task: ### Input: Resample the line to a specified number of points. :param num_points: Integer number of points the resulting line should have. :returns: A new line with that many points as requested. ### Response: def resample_to_num_points(self, num_points): """ Resample the line to a specified number of points. :param num_points: Integer number of points the resulting line should have. :returns: A new line with that many points as requested. """ assert len(self.points) > 1, "can not resample the line of one point" section_length = self.get_length() / (num_points - 1) resampled_points = [self.points[0]] segment = 0 acc_length = 0 last_segment_length = 0 for i in range(num_points - 1): tot_length = (i + 1) * section_length while tot_length > acc_length and segment < len(self.points) - 1: last_segment_length = self.points[segment].distance( self.points[segment + 1] ) acc_length += last_segment_length segment += 1 p1, p2 = self.points[segment - 1:segment + 1] offset = tot_length - (acc_length - last_segment_length) if offset < 1e-5: # forward geodetic transformations for very small distances # are very inefficient (and also unneeded). if target point # is just 1 cm away from original (non-resampled) line vertex, # don't even bother doing geodetic calculations. resampled = p1 else: resampled = p1.equally_spaced_points(p2, offset)[1] resampled_points.append(resampled) return Line(resampled_points)
def _theorem5p4(adj, ub): """By Theorem 5.4, if any two vertices have ub + 1 common neighbors then we can add an edge between them. """ new_edges = set() for u, v in itertools.combinations(adj, 2): if u in adj[v]: # already an edge continue if len(adj[u].intersection(adj[v])) > ub: new_edges.add((u, v)) while new_edges: for u, v in new_edges: adj[u].add(v) adj[v].add(u) new_edges = set() for u, v in itertools.combinations(adj, 2): if u in adj[v]: continue if len(adj[u].intersection(adj[v])) > ub: new_edges.add((u, v))
By Theorem 5.4, if any two vertices have ub + 1 common neighbors then we can add an edge between them.
Below is the the instruction that describes the task: ### Input: By Theorem 5.4, if any two vertices have ub + 1 common neighbors then we can add an edge between them. ### Response: def _theorem5p4(adj, ub): """By Theorem 5.4, if any two vertices have ub + 1 common neighbors then we can add an edge between them. """ new_edges = set() for u, v in itertools.combinations(adj, 2): if u in adj[v]: # already an edge continue if len(adj[u].intersection(adj[v])) > ub: new_edges.add((u, v)) while new_edges: for u, v in new_edges: adj[u].add(v) adj[v].add(u) new_edges = set() for u, v in itertools.combinations(adj, 2): if u in adj[v]: continue if len(adj[u].intersection(adj[v])) > ub: new_edges.add((u, v))
def remove_tenant_user_role(request, project=None, user=None, role=None, group=None, domain=None): """Removes a given single role for a user from a tenant.""" manager = keystoneclient(request, admin=True).roles if VERSIONS.active < 3: return manager.remove_user_role(user, role, project) else: return manager.revoke(role, user=user, project=project, group=group, domain=domain)
Removes a given single role for a user from a tenant.
Below is the the instruction that describes the task: ### Input: Removes a given single role for a user from a tenant. ### Response: def remove_tenant_user_role(request, project=None, user=None, role=None, group=None, domain=None): """Removes a given single role for a user from a tenant.""" manager = keystoneclient(request, admin=True).roles if VERSIONS.active < 3: return manager.remove_user_role(user, role, project) else: return manager.revoke(role, user=user, project=project, group=group, domain=domain)
def has_changed (filename): """Check if filename has changed since the last check. If this is the first check, assume the file is changed.""" key = os.path.abspath(filename) mtime = get_mtime(key) if key not in _mtime_cache: _mtime_cache[key] = mtime return True return mtime > _mtime_cache[key]
Check if filename has changed since the last check. If this is the first check, assume the file is changed.
Below is the the instruction that describes the task: ### Input: Check if filename has changed since the last check. If this is the first check, assume the file is changed. ### Response: def has_changed (filename): """Check if filename has changed since the last check. If this is the first check, assume the file is changed.""" key = os.path.abspath(filename) mtime = get_mtime(key) if key not in _mtime_cache: _mtime_cache[key] = mtime return True return mtime > _mtime_cache[key]
def showFileHeaderData(peInstance): """ Prints IMAGE_FILE_HEADER fields. """ fileHeaderFields = peInstance.ntHeaders.fileHeader.getFields() print "[+] IMAGE_FILE_HEADER values:\n" for field in fileHeaderFields: print "--> %s = 0x%08x" % (field, fileHeaderFields[field].value)
Prints IMAGE_FILE_HEADER fields.
Below is the the instruction that describes the task: ### Input: Prints IMAGE_FILE_HEADER fields. ### Response: def showFileHeaderData(peInstance): """ Prints IMAGE_FILE_HEADER fields. """ fileHeaderFields = peInstance.ntHeaders.fileHeader.getFields() print "[+] IMAGE_FILE_HEADER values:\n" for field in fileHeaderFields: print "--> %s = 0x%08x" % (field, fileHeaderFields[field].value)
def getAllChildNodes(self): ''' getAllChildNodes - Gets all the children, and their children, and their children, and so on, all the way to the end as a TagCollection. Use .childNodes for a regular list @return TagCollection<AdvancedTag> - A TagCollection of all children (and their children recursive) ''' ret = TagCollection() # Scan all the children of this node for child in self.children: # Append each child ret.append(child) # Append children's children recursive ret += child.getAllChildNodes() return ret
getAllChildNodes - Gets all the children, and their children, and their children, and so on, all the way to the end as a TagCollection. Use .childNodes for a regular list @return TagCollection<AdvancedTag> - A TagCollection of all children (and their children recursive)
Below is the the instruction that describes the task: ### Input: getAllChildNodes - Gets all the children, and their children, and their children, and so on, all the way to the end as a TagCollection. Use .childNodes for a regular list @return TagCollection<AdvancedTag> - A TagCollection of all children (and their children recursive) ### Response: def getAllChildNodes(self): ''' getAllChildNodes - Gets all the children, and their children, and their children, and so on, all the way to the end as a TagCollection. Use .childNodes for a regular list @return TagCollection<AdvancedTag> - A TagCollection of all children (and their children recursive) ''' ret = TagCollection() # Scan all the children of this node for child in self.children: # Append each child ret.append(child) # Append children's children recursive ret += child.getAllChildNodes() return ret
def get_temp_dir(keep=False): """Creates a temporary file, and returns its name. :param keep: If False (default), the directory will be recursively deleted when Scapy exits. :return: A full path to a temporary directory. """ dname = tempfile.mkdtemp(prefix="scapy") if not keep: conf.temp_files.append(dname) return dname
Creates a temporary file, and returns its name. :param keep: If False (default), the directory will be recursively deleted when Scapy exits. :return: A full path to a temporary directory.
Below is the the instruction that describes the task: ### Input: Creates a temporary file, and returns its name. :param keep: If False (default), the directory will be recursively deleted when Scapy exits. :return: A full path to a temporary directory. ### Response: def get_temp_dir(keep=False): """Creates a temporary file, and returns its name. :param keep: If False (default), the directory will be recursively deleted when Scapy exits. :return: A full path to a temporary directory. """ dname = tempfile.mkdtemp(prefix="scapy") if not keep: conf.temp_files.append(dname) return dname
def _compute_prediction(self): """Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None """ total_weight = 0 total_prediction = 0 for rule in self._rules.values(): total_weight += rule.prediction_weight total_prediction += (rule.prediction * rule.prediction_weight) self._prediction = total_prediction / (total_weight or 1) self._prediction_weight = total_weight
Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None
Below is the the instruction that describes the task: ### Input: Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None ### Response: def _compute_prediction(self): """Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None """ total_weight = 0 total_prediction = 0 for rule in self._rules.values(): total_weight += rule.prediction_weight total_prediction += (rule.prediction * rule.prediction_weight) self._prediction = total_prediction / (total_weight or 1) self._prediction_weight = total_weight
def _bfgs_direction(s, y, x, hessinv_estimate=None): r"""Compute ``Hn^-1(x)`` for the L-BFGS method. Parameters ---------- s : sequence of `LinearSpaceElement` The ``s`` coefficients in the BFGS update, see Notes. y : sequence of `LinearSpaceElement` The ``y`` coefficients in the BFGS update, see Notes. x : `LinearSpaceElement` Point in which to evaluate the product. hessinv_estimate : `Operator`, optional Initial estimate of the hessian ``H0^-1``. Returns ------- r : ``x.space`` element The result of ``Hn^-1(x)``. Notes ----- :math:`H_n^{-1}` is defined recursively as .. math:: H_{n+1}^{-1} = \left(I - \frac{ s_n y_n^T}{y_n^T s_n} \right) H_{n}^{-1} \left(I - \frac{ y_n s_n^T}{y_n^T s_n} \right) + \frac{s_n s_n^T}{y_n^T \, s_n} With :math:`H_0^{-1}` given by ``hess_estimate``. """ assert len(s) == len(y) r = x.copy() alphas = np.zeros(len(s)) rhos = np.zeros(len(s)) for i in reversed(range(len(s))): rhos[i] = 1.0 / y[i].inner(s[i]) alphas[i] = rhos[i] * (s[i].inner(r)) r.lincomb(1, r, -alphas[i], y[i]) if hessinv_estimate is not None: r = hessinv_estimate(r) for i in range(len(s)): beta = rhos[i] * (y[i].inner(r)) r.lincomb(1, r, alphas[i] - beta, s[i]) return r
r"""Compute ``Hn^-1(x)`` for the L-BFGS method. Parameters ---------- s : sequence of `LinearSpaceElement` The ``s`` coefficients in the BFGS update, see Notes. y : sequence of `LinearSpaceElement` The ``y`` coefficients in the BFGS update, see Notes. x : `LinearSpaceElement` Point in which to evaluate the product. hessinv_estimate : `Operator`, optional Initial estimate of the hessian ``H0^-1``. Returns ------- r : ``x.space`` element The result of ``Hn^-1(x)``. Notes ----- :math:`H_n^{-1}` is defined recursively as .. math:: H_{n+1}^{-1} = \left(I - \frac{ s_n y_n^T}{y_n^T s_n} \right) H_{n}^{-1} \left(I - \frac{ y_n s_n^T}{y_n^T s_n} \right) + \frac{s_n s_n^T}{y_n^T \, s_n} With :math:`H_0^{-1}` given by ``hess_estimate``.
Below is the the instruction that describes the task: ### Input: r"""Compute ``Hn^-1(x)`` for the L-BFGS method. Parameters ---------- s : sequence of `LinearSpaceElement` The ``s`` coefficients in the BFGS update, see Notes. y : sequence of `LinearSpaceElement` The ``y`` coefficients in the BFGS update, see Notes. x : `LinearSpaceElement` Point in which to evaluate the product. hessinv_estimate : `Operator`, optional Initial estimate of the hessian ``H0^-1``. Returns ------- r : ``x.space`` element The result of ``Hn^-1(x)``. Notes ----- :math:`H_n^{-1}` is defined recursively as .. math:: H_{n+1}^{-1} = \left(I - \frac{ s_n y_n^T}{y_n^T s_n} \right) H_{n}^{-1} \left(I - \frac{ y_n s_n^T}{y_n^T s_n} \right) + \frac{s_n s_n^T}{y_n^T \, s_n} With :math:`H_0^{-1}` given by ``hess_estimate``. ### Response: def _bfgs_direction(s, y, x, hessinv_estimate=None): r"""Compute ``Hn^-1(x)`` for the L-BFGS method. Parameters ---------- s : sequence of `LinearSpaceElement` The ``s`` coefficients in the BFGS update, see Notes. y : sequence of `LinearSpaceElement` The ``y`` coefficients in the BFGS update, see Notes. x : `LinearSpaceElement` Point in which to evaluate the product. hessinv_estimate : `Operator`, optional Initial estimate of the hessian ``H0^-1``. Returns ------- r : ``x.space`` element The result of ``Hn^-1(x)``. Notes ----- :math:`H_n^{-1}` is defined recursively as .. math:: H_{n+1}^{-1} = \left(I - \frac{ s_n y_n^T}{y_n^T s_n} \right) H_{n}^{-1} \left(I - \frac{ y_n s_n^T}{y_n^T s_n} \right) + \frac{s_n s_n^T}{y_n^T \, s_n} With :math:`H_0^{-1}` given by ``hess_estimate``. """ assert len(s) == len(y) r = x.copy() alphas = np.zeros(len(s)) rhos = np.zeros(len(s)) for i in reversed(range(len(s))): rhos[i] = 1.0 / y[i].inner(s[i]) alphas[i] = rhos[i] * (s[i].inner(r)) r.lincomb(1, r, -alphas[i], y[i]) if hessinv_estimate is not None: r = hessinv_estimate(r) for i in range(len(s)): beta = rhos[i] * (y[i].inner(r)) r.lincomb(1, r, alphas[i] - beta, s[i]) return r
def _load_region(self, acct_id, region_name, path): """load config from a single per-region subdirectory of an account""" lim_path = os.path.join(path, 'limit_overrides.json') thresh_path = os.path.join(path, 'threshold_overrides.json') res = {'limit_overrides': {}, 'threshold_overrides': {}} if os.path.exists(lim_path): with open(lim_path, 'r') as fh: res['limit_overrides'] = json.loads(fh.read()) if os.path.exists(thresh_path): with open(thresh_path, 'r') as fh: res['threshold_overrides'] = json.loads(fh.read()) self._config[acct_id]['regions'][region_name] = res
load config from a single per-region subdirectory of an account
Below is the the instruction that describes the task: ### Input: load config from a single per-region subdirectory of an account ### Response: def _load_region(self, acct_id, region_name, path): """load config from a single per-region subdirectory of an account""" lim_path = os.path.join(path, 'limit_overrides.json') thresh_path = os.path.join(path, 'threshold_overrides.json') res = {'limit_overrides': {}, 'threshold_overrides': {}} if os.path.exists(lim_path): with open(lim_path, 'r') as fh: res['limit_overrides'] = json.loads(fh.read()) if os.path.exists(thresh_path): with open(thresh_path, 'r') as fh: res['threshold_overrides'] = json.loads(fh.read()) self._config[acct_id]['regions'][region_name] = res
def run(self): """Run the main loop. Returns exit code.""" self.exit_code = 1 self.mainloop = GLib.MainLoop() try: future = ensure_future(self._start_async_tasks()) future.callbacks.append(self.set_exit_code) self.mainloop.run() return self.exit_code except KeyboardInterrupt: return 1
Run the main loop. Returns exit code.
Below is the the instruction that describes the task: ### Input: Run the main loop. Returns exit code. ### Response: def run(self): """Run the main loop. Returns exit code.""" self.exit_code = 1 self.mainloop = GLib.MainLoop() try: future = ensure_future(self._start_async_tasks()) future.callbacks.append(self.set_exit_code) self.mainloop.run() return self.exit_code except KeyboardInterrupt: return 1
def lookup_field(key, lookup_type=None, placeholder=None, html_class="div", select_type="strapselect", mapping="uuid"): """Generates a lookup field for form definitions""" if lookup_type is None: lookup_type = key if placeholder is None: placeholder = "Select a " + lookup_type result = { 'key': key, 'htmlClass': html_class, 'type': select_type, 'placeholder': placeholder, 'options': { "type": lookup_type, "asyncCallback": "$ctrl.getFormData", "map": {'valueProperty': mapping, 'nameProperty': 'name'} } } return result
Generates a lookup field for form definitions
Below is the the instruction that describes the task: ### Input: Generates a lookup field for form definitions ### Response: def lookup_field(key, lookup_type=None, placeholder=None, html_class="div", select_type="strapselect", mapping="uuid"): """Generates a lookup field for form definitions""" if lookup_type is None: lookup_type = key if placeholder is None: placeholder = "Select a " + lookup_type result = { 'key': key, 'htmlClass': html_class, 'type': select_type, 'placeholder': placeholder, 'options': { "type": lookup_type, "asyncCallback": "$ctrl.getFormData", "map": {'valueProperty': mapping, 'nameProperty': 'name'} } } return result
def backend_widget(backend): """Creates a backend widget. """ config = backend.configuration().to_dict() props = backend.properties().to_dict() name = widgets.HTML(value="<h4>{name}</h4>".format(name=backend.name()), layout=widgets.Layout()) n_qubits = config['n_qubits'] qubit_count = widgets.HTML(value="<h5><b>{qubits}</b></h5>".format(qubits=n_qubits), layout=widgets.Layout(justify_content='center')) cmap = widgets.Output(layout=widgets.Layout(min_width='250px', max_width='250px', max_height='250px', min_height='250px', justify_content='center', align_items='center', margin='0px 0px 0px 0px')) with cmap: _cmap_fig = plot_gate_map(backend, plot_directed=False, label_qubits=False) if _cmap_fig is not None: display(_cmap_fig) # Prevents plot from showing up twice. plt.close(_cmap_fig) pending = generate_jobs_pending_widget() is_oper = widgets.HTML(value="<h5></h5>", layout=widgets.Layout(justify_content='center')) least_busy = widgets.HTML(value="<h5></h5>", layout=widgets.Layout(justify_content='center')) t1_units = props['qubits'][0][0]['unit'] avg_t1 = round(sum([q[0]['value'] for q in props['qubits']])/n_qubits, 1) t1_widget = widgets.HTML(value="<h5>{t1} {units}</h5>".format(t1=avg_t1, units=t1_units), layout=widgets.Layout()) t2_units = props['qubits'][0][1]['unit'] avg_t2 = round(sum([q[1]['value'] for q in props['qubits']])/n_qubits, 1) t2_widget = widgets.HTML(value="<h5>{t2} {units}</h5>".format(t2=avg_t2, units=t2_units), layout=widgets.Layout()) out = widgets.VBox([name, cmap, qubit_count, pending, least_busy, is_oper, t1_widget, t2_widget], layout=widgets.Layout(display='inline-flex', flex_flow='column', align_items='center')) out._is_alive = True return out
Creates a backend widget.
Below is the the instruction that describes the task: ### Input: Creates a backend widget. ### Response: def backend_widget(backend): """Creates a backend widget. """ config = backend.configuration().to_dict() props = backend.properties().to_dict() name = widgets.HTML(value="<h4>{name}</h4>".format(name=backend.name()), layout=widgets.Layout()) n_qubits = config['n_qubits'] qubit_count = widgets.HTML(value="<h5><b>{qubits}</b></h5>".format(qubits=n_qubits), layout=widgets.Layout(justify_content='center')) cmap = widgets.Output(layout=widgets.Layout(min_width='250px', max_width='250px', max_height='250px', min_height='250px', justify_content='center', align_items='center', margin='0px 0px 0px 0px')) with cmap: _cmap_fig = plot_gate_map(backend, plot_directed=False, label_qubits=False) if _cmap_fig is not None: display(_cmap_fig) # Prevents plot from showing up twice. plt.close(_cmap_fig) pending = generate_jobs_pending_widget() is_oper = widgets.HTML(value="<h5></h5>", layout=widgets.Layout(justify_content='center')) least_busy = widgets.HTML(value="<h5></h5>", layout=widgets.Layout(justify_content='center')) t1_units = props['qubits'][0][0]['unit'] avg_t1 = round(sum([q[0]['value'] for q in props['qubits']])/n_qubits, 1) t1_widget = widgets.HTML(value="<h5>{t1} {units}</h5>".format(t1=avg_t1, units=t1_units), layout=widgets.Layout()) t2_units = props['qubits'][0][1]['unit'] avg_t2 = round(sum([q[1]['value'] for q in props['qubits']])/n_qubits, 1) t2_widget = widgets.HTML(value="<h5>{t2} {units}</h5>".format(t2=avg_t2, units=t2_units), layout=widgets.Layout()) out = widgets.VBox([name, cmap, qubit_count, pending, least_busy, is_oper, t1_widget, t2_widget], layout=widgets.Layout(display='inline-flex', flex_flow='column', align_items='center')) out._is_alive = True return out
def hexify(message): '''Print out printable characters, but others in hex''' import string hexified = [] for char in message: if (char in '\n\r \t') or (char not in string.printable): hexified.append('\\x%02x' % ord(char)) else: hexified.append(char) return ''.join(hexified)
Print out printable characters, but others in hex
Below is the the instruction that describes the task: ### Input: Print out printable characters, but others in hex ### Response: def hexify(message): '''Print out printable characters, but others in hex''' import string hexified = [] for char in message: if (char in '\n\r \t') or (char not in string.printable): hexified.append('\\x%02x' % ord(char)) else: hexified.append(char) return ''.join(hexified)
def _create_user_agent(self): """ Create the user agent and return it as a string. """ user_agent = '{}/{} {}'.format(pyspacegdn.__title__, pyspacegdn.__version__, default_user_agent()) if self.client_name: user_agent = '{}/{} {}'.format(self.client_name, self.client_version, user_agent) return user_agent
Create the user agent and return it as a string.
Below is the the instruction that describes the task: ### Input: Create the user agent and return it as a string. ### Response: def _create_user_agent(self): """ Create the user agent and return it as a string. """ user_agent = '{}/{} {}'.format(pyspacegdn.__title__, pyspacegdn.__version__, default_user_agent()) if self.client_name: user_agent = '{}/{} {}'.format(self.client_name, self.client_version, user_agent) return user_agent
def _filehandler(configurable): """Default logging file handler.""" filename = configurable.log_name.replace('.', sep) path = join(configurable.log_path, '{0}.log'.format(filename)) return FileHandler(path, mode='a+')
Default logging file handler.
Below is the the instruction that describes the task: ### Input: Default logging file handler. ### Response: def _filehandler(configurable): """Default logging file handler.""" filename = configurable.log_name.replace('.', sep) path = join(configurable.log_path, '{0}.log'.format(filename)) return FileHandler(path, mode='a+')
def reload_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") reload = ET.Element("reload") config = reload input = ET.SubElement(reload, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def reload_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") reload = ET.Element("reload") config = reload input = ET.SubElement(reload, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
def _get_referenced_fields_and_fragment_names( context, # ValidationContext cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]] fragment, # type: InlineFragment ): # type: (...) -> Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]] """Given a reference to a fragment, return the represented collection of fields as well as a list of nested fragment names referenced via fragment spreads.""" # Short-circuit building a type from the AST if possible. cached = cached_fields_and_fragment_names.get(fragment.selection_set) if cached: return cached fragment_type = type_from_ast( # type: ignore context.get_schema(), fragment.type_condition ) return _get_fields_and_fragments_names( # type: ignore context, cached_fields_and_fragment_names, fragment_type, fragment.selection_set )
Given a reference to a fragment, return the represented collection of fields as well as a list of nested fragment names referenced via fragment spreads.
Below is the the instruction that describes the task: ### Input: Given a reference to a fragment, return the represented collection of fields as well as a list of nested fragment names referenced via fragment spreads. ### Response: def _get_referenced_fields_and_fragment_names( context, # ValidationContext cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]] fragment, # type: InlineFragment ): # type: (...) -> Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]] """Given a reference to a fragment, return the represented collection of fields as well as a list of nested fragment names referenced via fragment spreads.""" # Short-circuit building a type from the AST if possible. cached = cached_fields_and_fragment_names.get(fragment.selection_set) if cached: return cached fragment_type = type_from_ast( # type: ignore context.get_schema(), fragment.type_condition ) return _get_fields_and_fragments_names( # type: ignore context, cached_fields_and_fragment_names, fragment_type, fragment.selection_set )
def lmx_relative(): """Language model using relative attention.""" hparams = lmx_base() hparams.self_attention_type = "dot_product_relative_v2" hparams.activation_dtype = "float32" hparams.weight_dtype = "float32" return hparams
Language model using relative attention.
Below is the the instruction that describes the task: ### Input: Language model using relative attention. ### Response: def lmx_relative(): """Language model using relative attention.""" hparams = lmx_base() hparams.self_attention_type = "dot_product_relative_v2" hparams.activation_dtype = "float32" hparams.weight_dtype = "float32" return hparams
def _do_cross_validation(self, clf, data, task): """Run voxelwise cross validation based on correlation vectors. clf: classification function the classifier to be used in cross validation data: 3D numpy array If using sklearn.svm.SVC with precomputed kernel, it is in shape [num_processed_voxels, num_epochs, num_epochs]; otherwise it is the input argument corr, in shape [num_processed_voxels, num_epochs, num_voxels] task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- results: list of tuple (voxel_id, accuracy) the accuracy numbers of all voxels, in accuracy descending order the length of array equals the number of assigned voxels """ time1 = time.time() if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed'\ and self.use_multiprocessing: inlist = [(clf, i + task[0], self.num_folds, data[i, :, :], self.labels) for i in range(task[1])] with multiprocessing.Pool(self.process_num) as pool: results = list(pool.starmap(_cross_validation_for_one_voxel, inlist)) else: results = [] for i in range(task[1]): result = _cross_validation_for_one_voxel(clf, i + task[0], self.num_folds, data[i, :, :], self.labels) results.append(result) time2 = time.time() logger.debug( 'cross validation for %d voxels, takes %.2f s' % (task[1], (time2 - time1)) ) return results
Run voxelwise cross validation based on correlation vectors. clf: classification function the classifier to be used in cross validation data: 3D numpy array If using sklearn.svm.SVC with precomputed kernel, it is in shape [num_processed_voxels, num_epochs, num_epochs]; otherwise it is the input argument corr, in shape [num_processed_voxels, num_epochs, num_voxels] task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- results: list of tuple (voxel_id, accuracy) the accuracy numbers of all voxels, in accuracy descending order the length of array equals the number of assigned voxels
Below is the the instruction that describes the task: ### Input: Run voxelwise cross validation based on correlation vectors. clf: classification function the classifier to be used in cross validation data: 3D numpy array If using sklearn.svm.SVC with precomputed kernel, it is in shape [num_processed_voxels, num_epochs, num_epochs]; otherwise it is the input argument corr, in shape [num_processed_voxels, num_epochs, num_voxels] task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- results: list of tuple (voxel_id, accuracy) the accuracy numbers of all voxels, in accuracy descending order the length of array equals the number of assigned voxels ### Response: def _do_cross_validation(self, clf, data, task): """Run voxelwise cross validation based on correlation vectors. clf: classification function the classifier to be used in cross validation data: 3D numpy array If using sklearn.svm.SVC with precomputed kernel, it is in shape [num_processed_voxels, num_epochs, num_epochs]; otherwise it is the input argument corr, in shape [num_processed_voxels, num_epochs, num_voxels] task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- results: list of tuple (voxel_id, accuracy) the accuracy numbers of all voxels, in accuracy descending order the length of array equals the number of assigned voxels """ time1 = time.time() if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed'\ and self.use_multiprocessing: inlist = [(clf, i + task[0], self.num_folds, data[i, :, :], self.labels) for i in range(task[1])] with multiprocessing.Pool(self.process_num) as pool: results = list(pool.starmap(_cross_validation_for_one_voxel, inlist)) else: results = [] for i in range(task[1]): result = _cross_validation_for_one_voxel(clf, i + task[0], self.num_folds, data[i, :, :], self.labels) results.append(result) time2 = time.time() logger.debug( 'cross validation for %d voxels, takes %.2f s' % (task[1], (time2 - time1)) ) return results
def clean_draft_pages_from_space(confluence, space_key, count, date_now): """ Remove draft pages from space using datetime.now :param confluence: :param space_key: :param count: :param date_now: :return: int counter """ pages = confluence.get_all_draft_pages_from_space(space=space_key, start=0, limit=500) for page in pages: page_id = page['id'] draft_page = confluence.get_draft_page_by_id(page_id=page_id) last_date_string = draft_page['version']['when'] last_date = datetime.datetime.strptime(last_date_string.replace(".000", "")[:-6], "%Y-%m-%dT%H:%M:%S") if (date_now - last_date) > datetime.timedelta(days=DRAFT_DAYS): count += 1 print("Removing page with page id: " + page_id) confluence.remove_page_as_draft(page_id=page_id) print("Removed page with date " + last_date_string) return count
Remove draft pages from space using datetime.now :param confluence: :param space_key: :param count: :param date_now: :return: int counter
Below is the the instruction that describes the task: ### Input: Remove draft pages from space using datetime.now :param confluence: :param space_key: :param count: :param date_now: :return: int counter ### Response: def clean_draft_pages_from_space(confluence, space_key, count, date_now): """ Remove draft pages from space using datetime.now :param confluence: :param space_key: :param count: :param date_now: :return: int counter """ pages = confluence.get_all_draft_pages_from_space(space=space_key, start=0, limit=500) for page in pages: page_id = page['id'] draft_page = confluence.get_draft_page_by_id(page_id=page_id) last_date_string = draft_page['version']['when'] last_date = datetime.datetime.strptime(last_date_string.replace(".000", "")[:-6], "%Y-%m-%dT%H:%M:%S") if (date_now - last_date) > datetime.timedelta(days=DRAFT_DAYS): count += 1 print("Removing page with page id: " + page_id) confluence.remove_page_as_draft(page_id=page_id) print("Removed page with date " + last_date_string) return count
def CheckMounts(filename): """Parses the currently mounted devices.""" with io.open(filename, "r") as fd: for line in fd: try: device, mnt_point, fs_type, _ = line.split(" ", 3) except ValueError: continue if fs_type in ACCEPTABLE_FILESYSTEMS: if os.path.exists(device): yield device, fs_type, mnt_point
Parses the currently mounted devices.
Below is the the instruction that describes the task: ### Input: Parses the currently mounted devices. ### Response: def CheckMounts(filename): """Parses the currently mounted devices.""" with io.open(filename, "r") as fd: for line in fd: try: device, mnt_point, fs_type, _ = line.split(" ", 3) except ValueError: continue if fs_type in ACCEPTABLE_FILESYSTEMS: if os.path.exists(device): yield device, fs_type, mnt_point
def generateFormatToExtension(format, fallbackFormat): """ +--------------+--------------------------------------------------------------------+ | mactype1 | Mac Type 1 font (generates suitcase and LWFN file) | +--------------+--------------------------------------------------------------------+ | macttf | Mac TrueType font (generates suitcase) | +--------------+--------------------------------------------------------------------+ | macttdfont | Mac TrueType font (generates suitcase with resources in data fork) | +--------------+--------------------------------------------------------------------+ | otfcff | PS OpenType (CFF-based) font (OTF) | +--------------+--------------------------------------------------------------------+ | otfttf | PC TrueType/TT OpenType font (TTF) | +--------------+--------------------------------------------------------------------+ | pctype1 | PC Type 1 font (binary/PFB) | +--------------+--------------------------------------------------------------------+ | pcmm | PC MultipleMaster font (PFB) | +--------------+--------------------------------------------------------------------+ | pctype1ascii | PC Type 1 font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | pcmmascii | PC MultipleMaster font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | ufo1 | UFO format version 1 | +--------------+--------------------------------------------------------------------+ | ufo2 | UFO format version 2 | +--------------+--------------------------------------------------------------------+ | ufo3 | UFO format version 3 | +--------------+--------------------------------------------------------------------+ | unixascii | UNIX ASCII font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ """ formatToExtension = dict( # mactype1=None, macttf=".ttf", macttdfont=".dfont", otfcff=".otf", otfttf=".ttf", # pctype1=None, # pcmm=None, # pctype1ascii=None, # pcmmascii=None, ufo1=".ufo", ufo2=".ufo", ufo3=".ufo", unixascii=".pfa", ) return formatToExtension.get(format, fallbackFormat)
+--------------+--------------------------------------------------------------------+ | mactype1 | Mac Type 1 font (generates suitcase and LWFN file) | +--------------+--------------------------------------------------------------------+ | macttf | Mac TrueType font (generates suitcase) | +--------------+--------------------------------------------------------------------+ | macttdfont | Mac TrueType font (generates suitcase with resources in data fork) | +--------------+--------------------------------------------------------------------+ | otfcff | PS OpenType (CFF-based) font (OTF) | +--------------+--------------------------------------------------------------------+ | otfttf | PC TrueType/TT OpenType font (TTF) | +--------------+--------------------------------------------------------------------+ | pctype1 | PC Type 1 font (binary/PFB) | +--------------+--------------------------------------------------------------------+ | pcmm | PC MultipleMaster font (PFB) | +--------------+--------------------------------------------------------------------+ | pctype1ascii | PC Type 1 font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | pcmmascii | PC MultipleMaster font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | ufo1 | UFO format version 1 | +--------------+--------------------------------------------------------------------+ | ufo2 | UFO format version 2 | +--------------+--------------------------------------------------------------------+ | ufo3 | UFO format version 3 | +--------------+--------------------------------------------------------------------+ | unixascii | UNIX ASCII font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+
Below is the the instruction that describes the task: ### Input: +--------------+--------------------------------------------------------------------+ | mactype1 | Mac Type 1 font (generates suitcase and LWFN file) | +--------------+--------------------------------------------------------------------+ | macttf | Mac TrueType font (generates suitcase) | +--------------+--------------------------------------------------------------------+ | macttdfont | Mac TrueType font (generates suitcase with resources in data fork) | +--------------+--------------------------------------------------------------------+ | otfcff | PS OpenType (CFF-based) font (OTF) | +--------------+--------------------------------------------------------------------+ | otfttf | PC TrueType/TT OpenType font (TTF) | +--------------+--------------------------------------------------------------------+ | pctype1 | PC Type 1 font (binary/PFB) | +--------------+--------------------------------------------------------------------+ | pcmm | PC MultipleMaster font (PFB) | +--------------+--------------------------------------------------------------------+ | pctype1ascii | PC Type 1 font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | pcmmascii | PC MultipleMaster font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | ufo1 | UFO format version 1 | +--------------+--------------------------------------------------------------------+ | ufo2 | UFO format version 2 | +--------------+--------------------------------------------------------------------+ | ufo3 | UFO format version 3 | +--------------+--------------------------------------------------------------------+ | unixascii | UNIX ASCII font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ ### Response: def generateFormatToExtension(format, fallbackFormat): """ +--------------+--------------------------------------------------------------------+ | mactype1 | Mac Type 1 font (generates suitcase and LWFN file) | +--------------+--------------------------------------------------------------------+ | macttf | Mac TrueType font (generates suitcase) | +--------------+--------------------------------------------------------------------+ | macttdfont | Mac TrueType font (generates suitcase with resources in data fork) | +--------------+--------------------------------------------------------------------+ | otfcff | PS OpenType (CFF-based) font (OTF) | +--------------+--------------------------------------------------------------------+ | otfttf | PC TrueType/TT OpenType font (TTF) | +--------------+--------------------------------------------------------------------+ | pctype1 | PC Type 1 font (binary/PFB) | +--------------+--------------------------------------------------------------------+ | pcmm | PC MultipleMaster font (PFB) | +--------------+--------------------------------------------------------------------+ | pctype1ascii | PC Type 1 font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | pcmmascii | PC MultipleMaster font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | ufo1 | UFO format version 1 | +--------------+--------------------------------------------------------------------+ | ufo2 | UFO format version 2 | +--------------+--------------------------------------------------------------------+ | ufo3 | UFO format version 3 | +--------------+--------------------------------------------------------------------+ | unixascii | UNIX ASCII font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ """ formatToExtension = dict( # mactype1=None, macttf=".ttf", macttdfont=".dfont", otfcff=".otf", otfttf=".ttf", # pctype1=None, # pcmm=None, # pctype1ascii=None, # pcmmascii=None, ufo1=".ufo", ufo2=".ufo", ufo3=".ufo", unixascii=".pfa", ) return formatToExtension.get(format, fallbackFormat)
def rotMatrix2AxisAndAngle(R): """ stackoverflow.com/questions/12463487/obtain-rotation-axis-from-rotation-matrix-and-translation-vector-in-opencv R : 3x3 rotation matrix returns axis, angle """ angle = np.arccos((R[0, 0] + R[1, 1] + R[2, 2] - 1)/ 2) axis = np.array([ # x (R[2, 1] - R[1, 2])/ np.sqrt((R[2, 1] - R[1, 2]) ** 2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2), # y (R[0, 2] - R[2, 0])/ np.sqrt((R[2, 1] - R[1, 2]) ** 2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2), # z (R[1, 0] - R[0, 1])/ np.sqrt((R[2, 1] - R[1, 2])**2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2)]) return axis, angle
stackoverflow.com/questions/12463487/obtain-rotation-axis-from-rotation-matrix-and-translation-vector-in-opencv R : 3x3 rotation matrix returns axis, angle
Below is the the instruction that describes the task: ### Input: stackoverflow.com/questions/12463487/obtain-rotation-axis-from-rotation-matrix-and-translation-vector-in-opencv R : 3x3 rotation matrix returns axis, angle ### Response: def rotMatrix2AxisAndAngle(R): """ stackoverflow.com/questions/12463487/obtain-rotation-axis-from-rotation-matrix-and-translation-vector-in-opencv R : 3x3 rotation matrix returns axis, angle """ angle = np.arccos((R[0, 0] + R[1, 1] + R[2, 2] - 1)/ 2) axis = np.array([ # x (R[2, 1] - R[1, 2])/ np.sqrt((R[2, 1] - R[1, 2]) ** 2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2), # y (R[0, 2] - R[2, 0])/ np.sqrt((R[2, 1] - R[1, 2]) ** 2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2), # z (R[1, 0] - R[0, 1])/ np.sqrt((R[2, 1] - R[1, 2])**2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2)]) return axis, angle
def _resolve_path(path, manager_dict): """ Resolve a path based on a dictionary of manager prefixes. Returns a triple of (prefix, manager, manager_relative_path). """ path = normalize_api_path(path) parts = path.split('/') # Try to find a sub-manager for the first subdirectory. mgr = manager_dict.get(parts[0]) if mgr is not None: return parts[0], mgr, '/'.join(parts[1:]) # Try to find use the root manager, if one was supplied. mgr = manager_dict.get('') if mgr is not None: return '', mgr, path raise HTTPError( 404, "Couldn't resolve path [{path}] and " "no root manager supplied!".format(path=path) )
Resolve a path based on a dictionary of manager prefixes. Returns a triple of (prefix, manager, manager_relative_path).
Below is the the instruction that describes the task: ### Input: Resolve a path based on a dictionary of manager prefixes. Returns a triple of (prefix, manager, manager_relative_path). ### Response: def _resolve_path(path, manager_dict): """ Resolve a path based on a dictionary of manager prefixes. Returns a triple of (prefix, manager, manager_relative_path). """ path = normalize_api_path(path) parts = path.split('/') # Try to find a sub-manager for the first subdirectory. mgr = manager_dict.get(parts[0]) if mgr is not None: return parts[0], mgr, '/'.join(parts[1:]) # Try to find use the root manager, if one was supplied. mgr = manager_dict.get('') if mgr is not None: return '', mgr, path raise HTTPError( 404, "Couldn't resolve path [{path}] and " "no root manager supplied!".format(path=path) )
def _unpack_bytes(bytes): """ Unpack a set of bytes into an integer. First pads to 4 bytes. Little endian. """ if bytes == b'': return 0 int_length = 4 len_diff = int_length - len(bytes) bytes = bytes + len_diff * b'\x00' return struct.unpack("<L", bytes)[0]
Unpack a set of bytes into an integer. First pads to 4 bytes. Little endian.
Below is the the instruction that describes the task: ### Input: Unpack a set of bytes into an integer. First pads to 4 bytes. Little endian. ### Response: def _unpack_bytes(bytes): """ Unpack a set of bytes into an integer. First pads to 4 bytes. Little endian. """ if bytes == b'': return 0 int_length = 4 len_diff = int_length - len(bytes) bytes = bytes + len_diff * b'\x00' return struct.unpack("<L", bytes)[0]
def addCompletedJob(self, job, wallTime): """ Adds the shape of a completed job to the queue, allowing the scalar to use the last N completed jobs in factoring how many nodes are required in the cluster. :param toil.job.JobNode job: The memory, core and disk requirements of the completed job :param int wallTime: The wall-time taken to complete the job in seconds. """ #Adjust average runtimes to include this job. if job.jobName in self.jobNameToAvgRuntime: prevAvg = self.jobNameToAvgRuntime[job.jobName] prevNum = self.jobNameToNumCompleted[job.jobName] self.jobNameToAvgRuntime[job.jobName] = float(prevAvg*prevNum + wallTime)/(prevNum + 1) self.jobNameToNumCompleted[job.jobName] += 1 else: self.jobNameToAvgRuntime[job.jobName] = wallTime self.jobNameToNumCompleted[job.jobName] = 1 self.totalJobsCompleted += 1 self.totalAvgRuntime = float(self.totalAvgRuntime * (self.totalJobsCompleted - 1) + \ wallTime)/self.totalJobsCompleted
Adds the shape of a completed job to the queue, allowing the scalar to use the last N completed jobs in factoring how many nodes are required in the cluster. :param toil.job.JobNode job: The memory, core and disk requirements of the completed job :param int wallTime: The wall-time taken to complete the job in seconds.
Below is the the instruction that describes the task: ### Input: Adds the shape of a completed job to the queue, allowing the scalar to use the last N completed jobs in factoring how many nodes are required in the cluster. :param toil.job.JobNode job: The memory, core and disk requirements of the completed job :param int wallTime: The wall-time taken to complete the job in seconds. ### Response: def addCompletedJob(self, job, wallTime): """ Adds the shape of a completed job to the queue, allowing the scalar to use the last N completed jobs in factoring how many nodes are required in the cluster. :param toil.job.JobNode job: The memory, core and disk requirements of the completed job :param int wallTime: The wall-time taken to complete the job in seconds. """ #Adjust average runtimes to include this job. if job.jobName in self.jobNameToAvgRuntime: prevAvg = self.jobNameToAvgRuntime[job.jobName] prevNum = self.jobNameToNumCompleted[job.jobName] self.jobNameToAvgRuntime[job.jobName] = float(prevAvg*prevNum + wallTime)/(prevNum + 1) self.jobNameToNumCompleted[job.jobName] += 1 else: self.jobNameToAvgRuntime[job.jobName] = wallTime self.jobNameToNumCompleted[job.jobName] = 1 self.totalJobsCompleted += 1 self.totalAvgRuntime = float(self.totalAvgRuntime * (self.totalJobsCompleted - 1) + \ wallTime)/self.totalJobsCompleted
def _set_queues_interface(self, v, load=False): """ Setter method for queues_interface, mapped from YANG variable /openflow_state/queues_interface (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_interface() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=queues_interface.queues_interface, is_container='container', presence=False, yang_name="queues-interface", rest_name="queues-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues-interface-queues-interface-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """queues_interface must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=queues_interface.queues_interface, is_container='container', presence=False, yang_name="queues-interface", rest_name="queues-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues-interface-queues-interface-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""", }) self.__queues_interface = t if hasattr(self, '_set'): self._set()
Setter method for queues_interface, mapped from YANG variable /openflow_state/queues_interface (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_interface() directly.
Below is the the instruction that describes the task: ### Input: Setter method for queues_interface, mapped from YANG variable /openflow_state/queues_interface (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_interface() directly. ### Response: def _set_queues_interface(self, v, load=False): """ Setter method for queues_interface, mapped from YANG variable /openflow_state/queues_interface (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_interface() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=queues_interface.queues_interface, is_container='container', presence=False, yang_name="queues-interface", rest_name="queues-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues-interface-queues-interface-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """queues_interface must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=queues_interface.queues_interface, is_container='container', presence=False, yang_name="queues-interface", rest_name="queues-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues-interface-queues-interface-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""", }) self.__queues_interface = t if hasattr(self, '_set'): self._set()
def create_network(self, action, n_name, **kwargs): """ Creates a configured network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name. :type n_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict """ c_kwargs = self.get_network_create_kwargs(action, n_name, **kwargs) res = action.client.create_network(**c_kwargs) self._policy.network_names[action.client_name][n_name] = res['Id'] return res
Creates a configured network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name. :type n_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict
Below is the the instruction that describes the task: ### Input: Creates a configured network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name. :type n_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict ### Response: def create_network(self, action, n_name, **kwargs): """ Creates a configured network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name. :type n_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict """ c_kwargs = self.get_network_create_kwargs(action, n_name, **kwargs) res = action.client.create_network(**c_kwargs) self._policy.network_names[action.client_name][n_name] = res['Id'] return res
def get_name(self, data): """ For non-specific queries, this will return the actual name in the result. """ if self.node.specific_attribute: return self.node.name name = data.get(self.predicate_var) if str(RDF.type) in [self.node.name, name]: return '$schema' if name.startswith(PRED): name = name[len(PRED):] return name
For non-specific queries, this will return the actual name in the result.
Below is the the instruction that describes the task: ### Input: For non-specific queries, this will return the actual name in the result. ### Response: def get_name(self, data): """ For non-specific queries, this will return the actual name in the result. """ if self.node.specific_attribute: return self.node.name name = data.get(self.predicate_var) if str(RDF.type) in [self.node.name, name]: return '$schema' if name.startswith(PRED): name = name[len(PRED):] return name
def handle(self, *args, **options): """ Run the specified Selenium test(s) the indicated number of times in the specified browser. """ browser_name = options['browser_name'] count = options['count'] if len(args) > 0: tests = list(args) else: tests = settings.SELENIUM_DEFAULT_TESTS # Kill any orphaned chromedriver processes process = Popen(['killall', 'chromedriver'], stderr=open(os.devnull, 'w')) process.wait() # Clear any old log and screenshots self.clean() docker = None sc_process = None selenium_process = None if options['docker']: if browser_name not in ['chrome', 'firefox']: self.stdout.write('Only chrome and firefox can currently be run in a Docker container') return docker = DockerSelenium(browser=browser_name, port=settings.SELENIUM_DOCKER_PORT, tag=settings.SELENIUM_DOCKER_TAG, debug=settings.SELENIUM_DOCKER_DEBUG) elif 'platform' in options and settings.SELENIUM_SAUCE_CONNECT_PATH: running, sc_process = self.verify_sauce_connect_is_running(options) if not running: return elif browser_name in ['opera', 'safari']: running, selenium_process = self.verify_selenium_server_is_running() if not running: return elif browser_name in ['ipad', 'iphone']: if not self.verify_appium_is_running(): return # Make it so django-nose won't have nosetests choke on our parameters TestRunner = get_runner(django_settings) if hasattr(TestRunner, 'django_opts'): for option in OPTIONS: TestRunner.django_opts.extend(option[0]) # Configure and run the tests try: if docker: docker.start() options['command_executor'] = docker.command_executor() self.update_environment(options) self.run_tests(tests, browser_name, count) finally: # Stop the Selenium Docker container, if running if docker and docker.container_id: docker.stop() # Kill Sauce Connect, if running if sc_process: sc_process.kill() # Kill the Selenium standalone server, if running if selenium_process: selenium_process.kill()
Run the specified Selenium test(s) the indicated number of times in the specified browser.
Below is the the instruction that describes the task: ### Input: Run the specified Selenium test(s) the indicated number of times in the specified browser. ### Response: def handle(self, *args, **options): """ Run the specified Selenium test(s) the indicated number of times in the specified browser. """ browser_name = options['browser_name'] count = options['count'] if len(args) > 0: tests = list(args) else: tests = settings.SELENIUM_DEFAULT_TESTS # Kill any orphaned chromedriver processes process = Popen(['killall', 'chromedriver'], stderr=open(os.devnull, 'w')) process.wait() # Clear any old log and screenshots self.clean() docker = None sc_process = None selenium_process = None if options['docker']: if browser_name not in ['chrome', 'firefox']: self.stdout.write('Only chrome and firefox can currently be run in a Docker container') return docker = DockerSelenium(browser=browser_name, port=settings.SELENIUM_DOCKER_PORT, tag=settings.SELENIUM_DOCKER_TAG, debug=settings.SELENIUM_DOCKER_DEBUG) elif 'platform' in options and settings.SELENIUM_SAUCE_CONNECT_PATH: running, sc_process = self.verify_sauce_connect_is_running(options) if not running: return elif browser_name in ['opera', 'safari']: running, selenium_process = self.verify_selenium_server_is_running() if not running: return elif browser_name in ['ipad', 'iphone']: if not self.verify_appium_is_running(): return # Make it so django-nose won't have nosetests choke on our parameters TestRunner = get_runner(django_settings) if hasattr(TestRunner, 'django_opts'): for option in OPTIONS: TestRunner.django_opts.extend(option[0]) # Configure and run the tests try: if docker: docker.start() options['command_executor'] = docker.command_executor() self.update_environment(options) self.run_tests(tests, browser_name, count) finally: # Stop the Selenium Docker container, if running if docker and docker.container_id: docker.stop() # Kill Sauce Connect, if running if sc_process: sc_process.kill() # Kill the Selenium standalone server, if running if selenium_process: selenium_process.kill()
def from_json_file(schema_file, validate=True): # type: (TextIO, bool) -> Schema """ Load a Schema object from a json file. :param schema_file: A JSON file containing the schema. :param validate: (default True) Raise an exception if the schema does not conform to the master schema. :raises SchemaError: When the schema is invalid. :return: the Schema """ try: schema_dict = json.load(schema_file) except ValueError as e: # In Python 3 we can be more specific # with json.decoder.JSONDecodeError, # but that doesn't exist in Python 2. msg = 'The schema is not a valid JSON file.' raise_from(SchemaError(msg), e) return from_json_dict(schema_dict, validate=validate)
Load a Schema object from a json file. :param schema_file: A JSON file containing the schema. :param validate: (default True) Raise an exception if the schema does not conform to the master schema. :raises SchemaError: When the schema is invalid. :return: the Schema
Below is the the instruction that describes the task: ### Input: Load a Schema object from a json file. :param schema_file: A JSON file containing the schema. :param validate: (default True) Raise an exception if the schema does not conform to the master schema. :raises SchemaError: When the schema is invalid. :return: the Schema ### Response: def from_json_file(schema_file, validate=True): # type: (TextIO, bool) -> Schema """ Load a Schema object from a json file. :param schema_file: A JSON file containing the schema. :param validate: (default True) Raise an exception if the schema does not conform to the master schema. :raises SchemaError: When the schema is invalid. :return: the Schema """ try: schema_dict = json.load(schema_file) except ValueError as e: # In Python 3 we can be more specific # with json.decoder.JSONDecodeError, # but that doesn't exist in Python 2. msg = 'The schema is not a valid JSON file.' raise_from(SchemaError(msg), e) return from_json_dict(schema_dict, validate=validate)
def scan(self, module, onerror=None, ignore=None): """Scan the given module object for L{Method}s and register them.""" from venusian import Scanner scanner = Scanner(registry=self) kwargs = {"onerror": onerror, "categories": ["method"]} if ignore is not None: # Only pass it if specified, for backward compatibility kwargs["ignore"] = ignore scanner.scan(module, **kwargs)
Scan the given module object for L{Method}s and register them.
Below is the the instruction that describes the task: ### Input: Scan the given module object for L{Method}s and register them. ### Response: def scan(self, module, onerror=None, ignore=None): """Scan the given module object for L{Method}s and register them.""" from venusian import Scanner scanner = Scanner(registry=self) kwargs = {"onerror": onerror, "categories": ["method"]} if ignore is not None: # Only pass it if specified, for backward compatibility kwargs["ignore"] = ignore scanner.scan(module, **kwargs)
def enabled(self): """ read or write the child sub-reaper flag of the current process This property behaves in the following manner: * If a read is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) then no further attempts are made and the outcome is ``False``. * If a read is attempted and the current status is ``SR_UNKNOWN`` then a ``prctl(PR_GET_CHILD_SUBREAPER, ...)`` call is made and the outcome depends on the returned value. If prctl fails then status is set to ``SR_UNSUPPORTED`` and the return value is ``False``. If the prctl call succeeds then status is set to either ``SR_ENABLED`` or ``SR_DISABLED`` and ``True`` or ``False`` is returned, respectively. * If a write is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) *and* the write would have enabled the flag, a ValueError is raised with an appropriate message. Otherwise a write is attempted. If the attempt to enable the flag fails a ValueError is raised, just as in the previous case. * If a write intending to disable the flag fails then this failure is silently ignored but status is set to ``SR_UNSUPPORTED``. * If a write succeeds then the status is set accordingly to ``SR_ENABLED`` or ``SR_DISABLED``, depending on the value written ``True`` or ``False`` respectively. In other words, this property behaves as if it was really calling prctl() but it is not going to repeat operations that will always fail. Nor will it ignore failures silently where that matters. """ if self._status == self.SR_UNSUPPORTED: return False status = c_int() try: prctl(PR_GET_CHILD_SUBREAPER, addressof(status), 0, 0, 0) except OSError: self._status = self.SR_UNSUPPORTED else: self._status = self.SR_ENABLED if status else self.SR_DISABLED return self._status == self.SR_ENABLED
read or write the child sub-reaper flag of the current process This property behaves in the following manner: * If a read is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) then no further attempts are made and the outcome is ``False``. * If a read is attempted and the current status is ``SR_UNKNOWN`` then a ``prctl(PR_GET_CHILD_SUBREAPER, ...)`` call is made and the outcome depends on the returned value. If prctl fails then status is set to ``SR_UNSUPPORTED`` and the return value is ``False``. If the prctl call succeeds then status is set to either ``SR_ENABLED`` or ``SR_DISABLED`` and ``True`` or ``False`` is returned, respectively. * If a write is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) *and* the write would have enabled the flag, a ValueError is raised with an appropriate message. Otherwise a write is attempted. If the attempt to enable the flag fails a ValueError is raised, just as in the previous case. * If a write intending to disable the flag fails then this failure is silently ignored but status is set to ``SR_UNSUPPORTED``. * If a write succeeds then the status is set accordingly to ``SR_ENABLED`` or ``SR_DISABLED``, depending on the value written ``True`` or ``False`` respectively. In other words, this property behaves as if it was really calling prctl() but it is not going to repeat operations that will always fail. Nor will it ignore failures silently where that matters.
Below is the the instruction that describes the task: ### Input: read or write the child sub-reaper flag of the current process This property behaves in the following manner: * If a read is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) then no further attempts are made and the outcome is ``False``. * If a read is attempted and the current status is ``SR_UNKNOWN`` then a ``prctl(PR_GET_CHILD_SUBREAPER, ...)`` call is made and the outcome depends on the returned value. If prctl fails then status is set to ``SR_UNSUPPORTED`` and the return value is ``False``. If the prctl call succeeds then status is set to either ``SR_ENABLED`` or ``SR_DISABLED`` and ``True`` or ``False`` is returned, respectively. * If a write is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) *and* the write would have enabled the flag, a ValueError is raised with an appropriate message. Otherwise a write is attempted. If the attempt to enable the flag fails a ValueError is raised, just as in the previous case. * If a write intending to disable the flag fails then this failure is silently ignored but status is set to ``SR_UNSUPPORTED``. * If a write succeeds then the status is set accordingly to ``SR_ENABLED`` or ``SR_DISABLED``, depending on the value written ``True`` or ``False`` respectively. In other words, this property behaves as if it was really calling prctl() but it is not going to repeat operations that will always fail. Nor will it ignore failures silently where that matters. ### Response: def enabled(self): """ read or write the child sub-reaper flag of the current process This property behaves in the following manner: * If a read is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) then no further attempts are made and the outcome is ``False``. * If a read is attempted and the current status is ``SR_UNKNOWN`` then a ``prctl(PR_GET_CHILD_SUBREAPER, ...)`` call is made and the outcome depends on the returned value. If prctl fails then status is set to ``SR_UNSUPPORTED`` and the return value is ``False``. If the prctl call succeeds then status is set to either ``SR_ENABLED`` or ``SR_DISABLED`` and ``True`` or ``False`` is returned, respectively. * If a write is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) *and* the write would have enabled the flag, a ValueError is raised with an appropriate message. Otherwise a write is attempted. If the attempt to enable the flag fails a ValueError is raised, just as in the previous case. * If a write intending to disable the flag fails then this failure is silently ignored but status is set to ``SR_UNSUPPORTED``. * If a write succeeds then the status is set accordingly to ``SR_ENABLED`` or ``SR_DISABLED``, depending on the value written ``True`` or ``False`` respectively. In other words, this property behaves as if it was really calling prctl() but it is not going to repeat operations that will always fail. Nor will it ignore failures silently where that matters. """ if self._status == self.SR_UNSUPPORTED: return False status = c_int() try: prctl(PR_GET_CHILD_SUBREAPER, addressof(status), 0, 0, 0) except OSError: self._status = self.SR_UNSUPPORTED else: self._status = self.SR_ENABLED if status else self.SR_DISABLED return self._status == self.SR_ENABLED
async def identity_of(client: Client, search: str) -> dict: """ GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/identity-of/%s' % search, schema=IDENTITY_OF_SCHEMA)
GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return:
Below is the the instruction that describes the task: ### Input: GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return: ### Response: async def identity_of(client: Client, search: str) -> dict: """ GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/identity-of/%s' % search, schema=IDENTITY_OF_SCHEMA)
def sig_handler(self, sig, _): """ Handle the signal sent to the process :param sig: Signal set to the process :param _: Frame is not being used """ import tornado.ioloop from tornado.process import task_id tid = task_id() pid = os.getpid() if tid is None: logger.warning("main process (pid %s) caught signal: %s" % (pid, sig)) else: logger.warning("child %s (pid %s) caught signal: %s" % (tid, pid, sig)) tornado.ioloop.IOLoop.current().add_callback(self.shutdown)
Handle the signal sent to the process :param sig: Signal set to the process :param _: Frame is not being used
Below is the the instruction that describes the task: ### Input: Handle the signal sent to the process :param sig: Signal set to the process :param _: Frame is not being used ### Response: def sig_handler(self, sig, _): """ Handle the signal sent to the process :param sig: Signal set to the process :param _: Frame is not being used """ import tornado.ioloop from tornado.process import task_id tid = task_id() pid = os.getpid() if tid is None: logger.warning("main process (pid %s) caught signal: %s" % (pid, sig)) else: logger.warning("child %s (pid %s) caught signal: %s" % (tid, pid, sig)) tornado.ioloop.IOLoop.current().add_callback(self.shutdown)
def generate(self): """ Generates a new token for this column based on its bit length. This method will not ensure uniqueness in the model itself, that should be checked against the model records in the database first. :return: <str> """ try: model = self.schema().model() except AttributeError: return os.urandom(self.__bits).encode('hex') else: while True: token = os.urandom(self.__bits).encode('hex') if model.select(where=orb.Query(self) == token).count() == 0: return token
Generates a new token for this column based on its bit length. This method will not ensure uniqueness in the model itself, that should be checked against the model records in the database first. :return: <str>
Below is the the instruction that describes the task: ### Input: Generates a new token for this column based on its bit length. This method will not ensure uniqueness in the model itself, that should be checked against the model records in the database first. :return: <str> ### Response: def generate(self): """ Generates a new token for this column based on its bit length. This method will not ensure uniqueness in the model itself, that should be checked against the model records in the database first. :return: <str> """ try: model = self.schema().model() except AttributeError: return os.urandom(self.__bits).encode('hex') else: while True: token = os.urandom(self.__bits).encode('hex') if model.select(where=orb.Query(self) == token).count() == 0: return token
def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Passes the result to BayesOpt unless early terminated or errored""" if result: self.optimizer.register( params=self._live_trial_mapping[trial_id], target=result[self._reward_attr]) del self._live_trial_mapping[trial_id]
Passes the result to BayesOpt unless early terminated or errored
Below is the the instruction that describes the task: ### Input: Passes the result to BayesOpt unless early terminated or errored ### Response: def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Passes the result to BayesOpt unless early terminated or errored""" if result: self.optimizer.register( params=self._live_trial_mapping[trial_id], target=result[self._reward_attr]) del self._live_trial_mapping[trial_id]
def install_remote(self): """Download, extract and install NApp.""" package, pkg_folder = None, None try: package = self._download() pkg_folder = self._extract(package) napp_folder = self._get_local_folder(pkg_folder) dst = self._installed / self.user / self.napp self._check_module(dst.parent) shutil.move(str(napp_folder), str(dst)) finally: # Delete temporary files if package: Path(package).unlink() if pkg_folder and pkg_folder.exists(): shutil.rmtree(str(pkg_folder))
Download, extract and install NApp.
Below is the the instruction that describes the task: ### Input: Download, extract and install NApp. ### Response: def install_remote(self): """Download, extract and install NApp.""" package, pkg_folder = None, None try: package = self._download() pkg_folder = self._extract(package) napp_folder = self._get_local_folder(pkg_folder) dst = self._installed / self.user / self.napp self._check_module(dst.parent) shutil.move(str(napp_folder), str(dst)) finally: # Delete temporary files if package: Path(package).unlink() if pkg_folder and pkg_folder.exists(): shutil.rmtree(str(pkg_folder))
def clear_choice(self, choice): """stub""" if len(self.my_osid_object_form._my_map['choices']) == 0: raise IllegalState('there are currently no choices defined for this question') if (len(self.my_osid_object_form._my_map['choices']) == 1 and choice in self.my_osid_object_form._my_map['choices']): raise IllegalState() self.my_osid_object_form._my_map['choices'] = \ [c for c in self.my_osid_object_form._my_map['choices'] if c != choice]
stub
Below is the the instruction that describes the task: ### Input: stub ### Response: def clear_choice(self, choice): """stub""" if len(self.my_osid_object_form._my_map['choices']) == 0: raise IllegalState('there are currently no choices defined for this question') if (len(self.my_osid_object_form._my_map['choices']) == 1 and choice in self.my_osid_object_form._my_map['choices']): raise IllegalState() self.my_osid_object_form._my_map['choices'] = \ [c for c in self.my_osid_object_form._my_map['choices'] if c != choice]
def three_digit(number): """ Add 0s to inputs that their length is less than 3. :param number: The number to convert :type number: int :returns: String :example: >>> three_digit(1) '001' """ number = str(number) if len(number) == 1: return u'00%s' % number elif len(number) == 2: return u'0%s' % number else: return number
Add 0s to inputs that their length is less than 3. :param number: The number to convert :type number: int :returns: String :example: >>> three_digit(1) '001'
Below is the the instruction that describes the task: ### Input: Add 0s to inputs that their length is less than 3. :param number: The number to convert :type number: int :returns: String :example: >>> three_digit(1) '001' ### Response: def three_digit(number): """ Add 0s to inputs that their length is less than 3. :param number: The number to convert :type number: int :returns: String :example: >>> three_digit(1) '001' """ number = str(number) if len(number) == 1: return u'00%s' % number elif len(number) == 2: return u'0%s' % number else: return number
def _search_in_bases(type_): """Implementation detail.""" found = False for base_type in type_.declaration.bases: try: found = internal_type_traits.get_by_name( base_type.related_class, "element_type") except runtime_errors.declaration_not_found_t: pass if found: return found raise RuntimeError( ("Unable to find 'element_type' declaration '%s'" "in type '%s'.") % type_.decl_string)
Implementation detail.
Below is the the instruction that describes the task: ### Input: Implementation detail. ### Response: def _search_in_bases(type_): """Implementation detail.""" found = False for base_type in type_.declaration.bases: try: found = internal_type_traits.get_by_name( base_type.related_class, "element_type") except runtime_errors.declaration_not_found_t: pass if found: return found raise RuntimeError( ("Unable to find 'element_type' declaration '%s'" "in type '%s'.") % type_.decl_string)
def _check_and_replace_parser_args(parser, section, option, rename_func, make_dirs=True): """ Searches for parser settings that define filenames. If such settings are found, they are renamed according to the wildcard rules. Moreover, it is also tried to create the corresponding folders. :param parser: A config parser :param section: A config section :param option: The section option :param rename_func: A function to rename found files :param make_dirs: If the directories of the file should be created. """ args = parser.get(section, option, raw=True) strings = get_strings(args) replace = False for string in strings: isfilename = any(x in string for x in FILENAME_INDICATORS) if isfilename: newstring = rename_func(string) if make_dirs: try_make_dirs(newstring) # To work with windows path specifications we need this replacement: raw_string = string.replace('\\', '\\\\') raw_newstring = newstring.replace('\\', '\\\\') args = args.replace(raw_string, raw_newstring) replace = True if replace: parser.set(section, option, args)
Searches for parser settings that define filenames. If such settings are found, they are renamed according to the wildcard rules. Moreover, it is also tried to create the corresponding folders. :param parser: A config parser :param section: A config section :param option: The section option :param rename_func: A function to rename found files :param make_dirs: If the directories of the file should be created.
Below is the the instruction that describes the task: ### Input: Searches for parser settings that define filenames. If such settings are found, they are renamed according to the wildcard rules. Moreover, it is also tried to create the corresponding folders. :param parser: A config parser :param section: A config section :param option: The section option :param rename_func: A function to rename found files :param make_dirs: If the directories of the file should be created. ### Response: def _check_and_replace_parser_args(parser, section, option, rename_func, make_dirs=True): """ Searches for parser settings that define filenames. If such settings are found, they are renamed according to the wildcard rules. Moreover, it is also tried to create the corresponding folders. :param parser: A config parser :param section: A config section :param option: The section option :param rename_func: A function to rename found files :param make_dirs: If the directories of the file should be created. """ args = parser.get(section, option, raw=True) strings = get_strings(args) replace = False for string in strings: isfilename = any(x in string for x in FILENAME_INDICATORS) if isfilename: newstring = rename_func(string) if make_dirs: try_make_dirs(newstring) # To work with windows path specifications we need this replacement: raw_string = string.replace('\\', '\\\\') raw_newstring = newstring.replace('\\', '\\\\') args = args.replace(raw_string, raw_newstring) replace = True if replace: parser.set(section, option, args)
def help(obj, visualization=True, ansi=True, backend=None, recursive=False, pattern=None): """ Extended version of the built-in help that supports parameterized functions and objects. A pattern (regular expression) may be used to filter the output and if recursive is set to True, documentation for the supplied object is shown. Note that the recursive option will only work with an object instance and not a class. If ansi is set to False, all ANSI color codes are stripped out. """ backend = backend if backend else Store.current_backend info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization, recursive=recursive, pattern=pattern, elements=elements_list) msg = ("\nTo view the visualization options applicable to this " "object or class, use:\n\n" " holoviews.help(obj, visualization=True)\n\n") if info: print((msg if visualization is False else '') + info) else: pydoc.help(obj)
Extended version of the built-in help that supports parameterized functions and objects. A pattern (regular expression) may be used to filter the output and if recursive is set to True, documentation for the supplied object is shown. Note that the recursive option will only work with an object instance and not a class. If ansi is set to False, all ANSI color codes are stripped out.
Below is the the instruction that describes the task: ### Input: Extended version of the built-in help that supports parameterized functions and objects. A pattern (regular expression) may be used to filter the output and if recursive is set to True, documentation for the supplied object is shown. Note that the recursive option will only work with an object instance and not a class. If ansi is set to False, all ANSI color codes are stripped out. ### Response: def help(obj, visualization=True, ansi=True, backend=None, recursive=False, pattern=None): """ Extended version of the built-in help that supports parameterized functions and objects. A pattern (regular expression) may be used to filter the output and if recursive is set to True, documentation for the supplied object is shown. Note that the recursive option will only work with an object instance and not a class. If ansi is set to False, all ANSI color codes are stripped out. """ backend = backend if backend else Store.current_backend info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization, recursive=recursive, pattern=pattern, elements=elements_list) msg = ("\nTo view the visualization options applicable to this " "object or class, use:\n\n" " holoviews.help(obj, visualization=True)\n\n") if info: print((msg if visualization is False else '') + info) else: pydoc.help(obj)
def abs_path(rel_path): """Convert a path that is relative to the module from which this function is called, to an absolute path. Args: rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``. """ # noinspection PyProtectedMember return os.path.abspath( os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path) )
Convert a path that is relative to the module from which this function is called, to an absolute path. Args: rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``.
Below is the the instruction that describes the task: ### Input: Convert a path that is relative to the module from which this function is called, to an absolute path. Args: rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``. ### Response: def abs_path(rel_path): """Convert a path that is relative to the module from which this function is called, to an absolute path. Args: rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``. """ # noinspection PyProtectedMember return os.path.abspath( os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path) )
def get_closest_points(self, max_distance=None, origin_index=0, origin_raw=None): """ Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance. """ if not self.dict_response['distance']['value']: self.get_distance_values() if origin_raw: origin = copy.deepcopy(self.dict_response['distance']['value'][origin_raw]) else: origin = copy.deepcopy(self.dict_response['distance']['value'][self.origins[origin_index]]) tmp_origin = copy.deepcopy(origin) if max_distance is not None: for k, v in tmp_origin.iteritems(): if v > max_distance or v == 'ZERO_RESULTS': del(origin[k]) return origin
Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance.
Below is the the instruction that describes the task: ### Input: Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance. ### Response: def get_closest_points(self, max_distance=None, origin_index=0, origin_raw=None): """ Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance. """ if not self.dict_response['distance']['value']: self.get_distance_values() if origin_raw: origin = copy.deepcopy(self.dict_response['distance']['value'][origin_raw]) else: origin = copy.deepcopy(self.dict_response['distance']['value'][self.origins[origin_index]]) tmp_origin = copy.deepcopy(origin) if max_distance is not None: for k, v in tmp_origin.iteritems(): if v > max_distance or v == 'ZERO_RESULTS': del(origin[k]) return origin
def set_connections_params( self, harakiri=None, timeout_socket=None, retry_delay=None, retry_max=None, use_xclient=None): """Sets connection-related parameters. :param int harakiri: Set gateway harakiri timeout (seconds). :param int timeout_socket: Node socket timeout (seconds). Default: 60. :param int retry_delay: Retry connections to dead static nodes after the specified amount of seconds. Default: 30. :param int retry_max: Maximum number of retries/fallbacks to other nodes. Default: 3. :param bool use_xclient: Use the xclient protocol to pass the client address. """ super(RouterRaw, self).set_connections_params(**filter_locals(locals(), ['retry_max', 'use_xclient'])) self._set_aliased('max-retries', retry_max) self._set_aliased('xclient', use_xclient) return self
Sets connection-related parameters. :param int harakiri: Set gateway harakiri timeout (seconds). :param int timeout_socket: Node socket timeout (seconds). Default: 60. :param int retry_delay: Retry connections to dead static nodes after the specified amount of seconds. Default: 30. :param int retry_max: Maximum number of retries/fallbacks to other nodes. Default: 3. :param bool use_xclient: Use the xclient protocol to pass the client address.
Below is the the instruction that describes the task: ### Input: Sets connection-related parameters. :param int harakiri: Set gateway harakiri timeout (seconds). :param int timeout_socket: Node socket timeout (seconds). Default: 60. :param int retry_delay: Retry connections to dead static nodes after the specified amount of seconds. Default: 30. :param int retry_max: Maximum number of retries/fallbacks to other nodes. Default: 3. :param bool use_xclient: Use the xclient protocol to pass the client address. ### Response: def set_connections_params( self, harakiri=None, timeout_socket=None, retry_delay=None, retry_max=None, use_xclient=None): """Sets connection-related parameters. :param int harakiri: Set gateway harakiri timeout (seconds). :param int timeout_socket: Node socket timeout (seconds). Default: 60. :param int retry_delay: Retry connections to dead static nodes after the specified amount of seconds. Default: 30. :param int retry_max: Maximum number of retries/fallbacks to other nodes. Default: 3. :param bool use_xclient: Use the xclient protocol to pass the client address. """ super(RouterRaw, self).set_connections_params(**filter_locals(locals(), ['retry_max', 'use_xclient'])) self._set_aliased('max-retries', retry_max) self._set_aliased('xclient', use_xclient) return self
def knock_out(self): """Knockout gene by marking it as non-functional and setting all associated reactions bounds to zero. The change is reverted upon exit if executed within the model as context. """ self.functional = False for reaction in self.reactions: if not reaction.functional: reaction.bounds = (0, 0)
Knockout gene by marking it as non-functional and setting all associated reactions bounds to zero. The change is reverted upon exit if executed within the model as context.
Below is the the instruction that describes the task: ### Input: Knockout gene by marking it as non-functional and setting all associated reactions bounds to zero. The change is reverted upon exit if executed within the model as context. ### Response: def knock_out(self): """Knockout gene by marking it as non-functional and setting all associated reactions bounds to zero. The change is reverted upon exit if executed within the model as context. """ self.functional = False for reaction in self.reactions: if not reaction.functional: reaction.bounds = (0, 0)
def add_dependency(self, depend): """Adds dependencies.""" try: self._add_child(self.depends, self.depends_set, depend) except TypeError as e: e = e.args[0] if SCons.Util.is_List(e): s = list(map(str, e)) else: s = str(e) raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
Adds dependencies.
Below is the the instruction that describes the task: ### Input: Adds dependencies. ### Response: def add_dependency(self, depend): """Adds dependencies.""" try: self._add_child(self.depends, self.depends_set, depend) except TypeError as e: e = e.args[0] if SCons.Util.is_List(e): s = list(map(str, e)) else: s = str(e) raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def figsize(x=8, y=7., aspect=1.): """ manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar """ # update rcparams with adjusted figsize params mpl.rcParams.update({'figure.figsize': (x*aspect, y)})
manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar
Below is the the instruction that describes the task: ### Input: manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar ### Response: def figsize(x=8, y=7., aspect=1.): """ manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar """ # update rcparams with adjusted figsize params mpl.rcParams.update({'figure.figsize': (x*aspect, y)})
def create_from_json(cls, json_data): """Deserialize msa json data into a Msa object Args: json_data (dict): The json data for this msa Returns: Msa object """ msa = Msa() msa.msa = json_data["msa_info"]["msa"] msa.meta = json_data["meta"] if "meta" in json_data else None msa.component_results = _create_component_results(json_data, "msa_info") return msa
Deserialize msa json data into a Msa object Args: json_data (dict): The json data for this msa Returns: Msa object
Below is the the instruction that describes the task: ### Input: Deserialize msa json data into a Msa object Args: json_data (dict): The json data for this msa Returns: Msa object ### Response: def create_from_json(cls, json_data): """Deserialize msa json data into a Msa object Args: json_data (dict): The json data for this msa Returns: Msa object """ msa = Msa() msa.msa = json_data["msa_info"]["msa"] msa.meta = json_data["meta"] if "meta" in json_data else None msa.component_results = _create_component_results(json_data, "msa_info") return msa
def first(self) -> "QuerySet": """ Limit queryset to one object and return one object instead of list. """ queryset = self._clone() queryset._limit = 1 queryset._single = True return queryset
Limit queryset to one object and return one object instead of list.
Below is the the instruction that describes the task: ### Input: Limit queryset to one object and return one object instead of list. ### Response: def first(self) -> "QuerySet": """ Limit queryset to one object and return one object instead of list. """ queryset = self._clone() queryset._limit = 1 queryset._single = True return queryset
def get_online_version (): """Download update info and parse it.""" # prevent getting a cached answer headers = {'Pragma': 'no-cache', 'Cache-Control': 'no-cache'} content, info = get_content(UPDATE_URL, addheaders=headers) if content is None: return content, info version, url = None, None for line in content.splitlines(): if line.startswith(VERSION_TAG): version = line.split(':', 1)[1].strip() elif line.startswith(URL_TAG): url = line.split(':', 1)[1].strip() url = url.replace('${version}', version) return version, url
Download update info and parse it.
Below is the the instruction that describes the task: ### Input: Download update info and parse it. ### Response: def get_online_version (): """Download update info and parse it.""" # prevent getting a cached answer headers = {'Pragma': 'no-cache', 'Cache-Control': 'no-cache'} content, info = get_content(UPDATE_URL, addheaders=headers) if content is None: return content, info version, url = None, None for line in content.splitlines(): if line.startswith(VERSION_TAG): version = line.split(':', 1)[1].strip() elif line.startswith(URL_TAG): url = line.split(':', 1)[1].strip() url = url.replace('${version}', version) return version, url
def get_center_of_mass(self): """ Get a tuple (x,y) that is the center of mass. The center of mass is not necessarily the same as the center of the bounding box. Imagine a black square and a single dot wide outside of the square. """ xsum, ysum, counter = 0., 0., 0 for stroke in self.get_pointlist(): for point in stroke: xsum += point['x'] ysum += point['y'] counter += 1 return (xsum / counter, ysum / counter)
Get a tuple (x,y) that is the center of mass. The center of mass is not necessarily the same as the center of the bounding box. Imagine a black square and a single dot wide outside of the square.
Below is the the instruction that describes the task: ### Input: Get a tuple (x,y) that is the center of mass. The center of mass is not necessarily the same as the center of the bounding box. Imagine a black square and a single dot wide outside of the square. ### Response: def get_center_of_mass(self): """ Get a tuple (x,y) that is the center of mass. The center of mass is not necessarily the same as the center of the bounding box. Imagine a black square and a single dot wide outside of the square. """ xsum, ysum, counter = 0., 0., 0 for stroke in self.get_pointlist(): for point in stroke: xsum += point['x'] ysum += point['y'] counter += 1 return (xsum / counter, ysum / counter)
def get_fields(model): """ Returns a Model's knockout_fields, or the default set of field names. """ try: if hasattr(model, "knockout_fields"): fields = model.knockout_fields() else: try: fields = model_to_dict(model).keys() except Exception as e: fields = model._meta.get_fields() return fields # Crash proofing except Exception as e: logger.exception(e) return []
Returns a Model's knockout_fields, or the default set of field names.
Below is the the instruction that describes the task: ### Input: Returns a Model's knockout_fields, or the default set of field names. ### Response: def get_fields(model): """ Returns a Model's knockout_fields, or the default set of field names. """ try: if hasattr(model, "knockout_fields"): fields = model.knockout_fields() else: try: fields = model_to_dict(model).keys() except Exception as e: fields = model._meta.get_fields() return fields # Crash proofing except Exception as e: logger.exception(e) return []
def setsweep(self, sweep=0, channel=0): """set the sweep and channel of an ABF. Both start at 0.""" try: sweep=int(sweep) except: self.log.error("trying to set sweep to [%s]",sweep) return if sweep<0: sweep=self.sweeps-1-sweep # if negative, start from the end sweep=max(0,min(sweep,self.sweeps-1)) # correct for out of range sweeps if 'sweep' in dir(self) and self.sweep == sweep and self.derivative is False: self.log.debug("sweep %d already set",sweep) return #self.log.debug("loading sweep %d (Ch%d)",sweep,channel) self.channels=self.ABFblock.segments[sweep].size["analogsignals"] if self.channels>1 and sweep==0: self.log.info("WARNING: multichannel not yet supported!") #TODO: self.trace = self.ABFblock.segments[sweep].analogsignals[channel] self.sweep=sweep # currently selected sweep self.channel=channel # currently selected channel # sweep information self.rate = int(self.trace.sampling_rate) # Hz self.period = float(1/self.rate) # seconds (inverse of sample rate) self.pointsPerSec = int(self.rate) # for easy access self.pointsPerMs = int(self.rate/1000.0) # for easy access self.sweepSize = len(self.trace) # number of data points per sweep self.sweepInterval = self.trace.duration.magnitude # sweep interval (seconds) self.sweepLength = float(self.trace.t_stop-self.trace.t_start) # in seconds self.length = self.sweepLength*self.sweeps # length (sec) of total recording self.lengthMinutes = self.length/60.0 # length (minutes) of total recording if str(self.trace.dimensionality) == 'pA': self.units,self.units2="pA","clamp current (pA)" self.unitsD,self.unitsD2="pA/ms","current velocity (pA/ms)" self.protoUnits,self.protoUnits2="mV","command voltage (mV)" elif str(self.trace.dimensionality) == 'mV': self.units,self.units2="mV","membrane potential (mV)" self.unitsD,self.unitsD2="V/s","potential velocity (V/s)" self.protoUnits,self.protoUnits2="pA","command current (pA)" else: self.units,self.units2="?","unknown units" self.unitsD,self.unitsD2="?","unknown units" # sweep data self.sweepY = self.trace.magnitude # sweep data (mV or pA) self.sweepT = self.trace.times.magnitude # actual sweep times (sec) self.sweepStart = float(self.trace.t_start) # time start of sweep (sec) self.sweepX2 = self.sweepT-self.trace.t_start.magnitude # sweeps overlap self.sweepX = self.sweepX2+sweep*self.sweepInterval # assume no gaps if self.derivative: self.log.debug("taking derivative") #self.sweepD=np.diff(self.sweepY) # take derivative self.sweepD=self.sweepY[1:]-self.sweepY[:-1] # better? self.sweepD=np.insert(self.sweepD,0,self.sweepD[0]) # add a point self.sweepD/=(self.period*1000) # correct for sample rate else: self.sweepD=[0] # derivative is forced to be empty # generate the protocol too self.generate_protocol()
set the sweep and channel of an ABF. Both start at 0.
Below is the the instruction that describes the task: ### Input: set the sweep and channel of an ABF. Both start at 0. ### Response: def setsweep(self, sweep=0, channel=0): """set the sweep and channel of an ABF. Both start at 0.""" try: sweep=int(sweep) except: self.log.error("trying to set sweep to [%s]",sweep) return if sweep<0: sweep=self.sweeps-1-sweep # if negative, start from the end sweep=max(0,min(sweep,self.sweeps-1)) # correct for out of range sweeps if 'sweep' in dir(self) and self.sweep == sweep and self.derivative is False: self.log.debug("sweep %d already set",sweep) return #self.log.debug("loading sweep %d (Ch%d)",sweep,channel) self.channels=self.ABFblock.segments[sweep].size["analogsignals"] if self.channels>1 and sweep==0: self.log.info("WARNING: multichannel not yet supported!") #TODO: self.trace = self.ABFblock.segments[sweep].analogsignals[channel] self.sweep=sweep # currently selected sweep self.channel=channel # currently selected channel # sweep information self.rate = int(self.trace.sampling_rate) # Hz self.period = float(1/self.rate) # seconds (inverse of sample rate) self.pointsPerSec = int(self.rate) # for easy access self.pointsPerMs = int(self.rate/1000.0) # for easy access self.sweepSize = len(self.trace) # number of data points per sweep self.sweepInterval = self.trace.duration.magnitude # sweep interval (seconds) self.sweepLength = float(self.trace.t_stop-self.trace.t_start) # in seconds self.length = self.sweepLength*self.sweeps # length (sec) of total recording self.lengthMinutes = self.length/60.0 # length (minutes) of total recording if str(self.trace.dimensionality) == 'pA': self.units,self.units2="pA","clamp current (pA)" self.unitsD,self.unitsD2="pA/ms","current velocity (pA/ms)" self.protoUnits,self.protoUnits2="mV","command voltage (mV)" elif str(self.trace.dimensionality) == 'mV': self.units,self.units2="mV","membrane potential (mV)" self.unitsD,self.unitsD2="V/s","potential velocity (V/s)" self.protoUnits,self.protoUnits2="pA","command current (pA)" else: self.units,self.units2="?","unknown units" self.unitsD,self.unitsD2="?","unknown units" # sweep data self.sweepY = self.trace.magnitude # sweep data (mV or pA) self.sweepT = self.trace.times.magnitude # actual sweep times (sec) self.sweepStart = float(self.trace.t_start) # time start of sweep (sec) self.sweepX2 = self.sweepT-self.trace.t_start.magnitude # sweeps overlap self.sweepX = self.sweepX2+sweep*self.sweepInterval # assume no gaps if self.derivative: self.log.debug("taking derivative") #self.sweepD=np.diff(self.sweepY) # take derivative self.sweepD=self.sweepY[1:]-self.sweepY[:-1] # better? self.sweepD=np.insert(self.sweepD,0,self.sweepD[0]) # add a point self.sweepD/=(self.period*1000) # correct for sample rate else: self.sweepD=[0] # derivative is forced to be empty # generate the protocol too self.generate_protocol()
def melt(self, plot=False): """ Find and merge groups of polygons in the surface that meet the following criteria: * Are coplanars. * Are contiguous. * The result is convex. This method is very useful at reducing the number the items and, therefore, the shadowing time computing. Before override this instance, it is saved and can be restored with ``.restore()`` :param plot: If True, generates the before and after visualizations for the surface. Use it to check the results. :type plot: bool :returns: None .. warning:: This method do not check if the merged polygons are actually convex. The convex hull of the union is directly calculated. For this reason, it is very important to visualy check the solution. """ from pyny3d.utils import bool2index from scipy.spatial import ConvexHull # First, coplanarity ## Normalize parametric equations para = [poly.get_parametric() for poly in self] para = np.array([p/np.linalg.norm(p) for p in para]) n = para.shape[0] ## Coincidences cop = [] for i, plane in enumerate(para[:-1]): indexes = np.zeros((n-i-1, 4)) for c in range(4): indexes[:, c] = np.isclose(para[i+1:, c], plane[c]) pos = bool2index(indexes.sum(axis=1)==4)+i+1 if pos.shape[0] > 0: cop.append(np.hstack((i, pos))) para[pos, :] = np.nan # Second, contiguity substituted = [] cop_cont = [] for i, group in enumerate(cop): polygons = [self[i] for i in group] if Surface.contiguous(polygons): cop_cont.append(polygons) substituted.append(group) if len(substituted) != 0: self.save() if plot: self.plot() substituted = sum(substituted) # Hull merged = [] for polygons in cop_cont: points = np.concatenate([polygon.points for polygon in polygons]) hull = ConvexHull(points[:, :2]) merged.append(Polygon(points[hull.vertices])) # Final substitution new_surface = [self[i] for i in range(len(self.polygons)) if i not in substituted] new_surface += merged self.polygons = new_surface self.sorted_areas = None if plot: self.plot()
Find and merge groups of polygons in the surface that meet the following criteria: * Are coplanars. * Are contiguous. * The result is convex. This method is very useful at reducing the number the items and, therefore, the shadowing time computing. Before override this instance, it is saved and can be restored with ``.restore()`` :param plot: If True, generates the before and after visualizations for the surface. Use it to check the results. :type plot: bool :returns: None .. warning:: This method do not check if the merged polygons are actually convex. The convex hull of the union is directly calculated. For this reason, it is very important to visualy check the solution.
Below is the the instruction that describes the task: ### Input: Find and merge groups of polygons in the surface that meet the following criteria: * Are coplanars. * Are contiguous. * The result is convex. This method is very useful at reducing the number the items and, therefore, the shadowing time computing. Before override this instance, it is saved and can be restored with ``.restore()`` :param plot: If True, generates the before and after visualizations for the surface. Use it to check the results. :type plot: bool :returns: None .. warning:: This method do not check if the merged polygons are actually convex. The convex hull of the union is directly calculated. For this reason, it is very important to visualy check the solution. ### Response: def melt(self, plot=False): """ Find and merge groups of polygons in the surface that meet the following criteria: * Are coplanars. * Are contiguous. * The result is convex. This method is very useful at reducing the number the items and, therefore, the shadowing time computing. Before override this instance, it is saved and can be restored with ``.restore()`` :param plot: If True, generates the before and after visualizations for the surface. Use it to check the results. :type plot: bool :returns: None .. warning:: This method do not check if the merged polygons are actually convex. The convex hull of the union is directly calculated. For this reason, it is very important to visualy check the solution. """ from pyny3d.utils import bool2index from scipy.spatial import ConvexHull # First, coplanarity ## Normalize parametric equations para = [poly.get_parametric() for poly in self] para = np.array([p/np.linalg.norm(p) for p in para]) n = para.shape[0] ## Coincidences cop = [] for i, plane in enumerate(para[:-1]): indexes = np.zeros((n-i-1, 4)) for c in range(4): indexes[:, c] = np.isclose(para[i+1:, c], plane[c]) pos = bool2index(indexes.sum(axis=1)==4)+i+1 if pos.shape[0] > 0: cop.append(np.hstack((i, pos))) para[pos, :] = np.nan # Second, contiguity substituted = [] cop_cont = [] for i, group in enumerate(cop): polygons = [self[i] for i in group] if Surface.contiguous(polygons): cop_cont.append(polygons) substituted.append(group) if len(substituted) != 0: self.save() if plot: self.plot() substituted = sum(substituted) # Hull merged = [] for polygons in cop_cont: points = np.concatenate([polygon.points for polygon in polygons]) hull = ConvexHull(points[:, :2]) merged.append(Polygon(points[hull.vertices])) # Final substitution new_surface = [self[i] for i in range(len(self.polygons)) if i not in substituted] new_surface += merged self.polygons = new_surface self.sorted_areas = None if plot: self.plot()
def check_for_missed_cleanup(): """Check for TaskAttempts that were never cleaned up """ if get_setting('PRESERVE_ALL'): return from api.models.tasks import TaskAttempt if get_setting('PRESERVE_ON_FAILURE'): for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter( status_is_cleaned_up=False).exclude( status_is_failed=True): task_attempt.cleanup() else: for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter(status_is_cleaned_up=False): task_attempt.cleanup()
Check for TaskAttempts that were never cleaned up
Below is the the instruction that describes the task: ### Input: Check for TaskAttempts that were never cleaned up ### Response: def check_for_missed_cleanup(): """Check for TaskAttempts that were never cleaned up """ if get_setting('PRESERVE_ALL'): return from api.models.tasks import TaskAttempt if get_setting('PRESERVE_ON_FAILURE'): for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter( status_is_cleaned_up=False).exclude( status_is_failed=True): task_attempt.cleanup() else: for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter(status_is_cleaned_up=False): task_attempt.cleanup()
def vel_horizontal(HeightWaterCritical): """Return the horizontal velocity.""" #Checking input validity ut.check_range([HeightWaterCritical, ">0", "Critical height of water"]) return np.sqrt(gravity.magnitude * HeightWaterCritical)
Return the horizontal velocity.
Below is the the instruction that describes the task: ### Input: Return the horizontal velocity. ### Response: def vel_horizontal(HeightWaterCritical): """Return the horizontal velocity.""" #Checking input validity ut.check_range([HeightWaterCritical, ">0", "Critical height of water"]) return np.sqrt(gravity.magnitude * HeightWaterCritical)
def _dict_subset(keys, master_dict): ''' Return a dictionary of only the subset of keys/values specified in keys ''' return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys])
Return a dictionary of only the subset of keys/values specified in keys
Below is the the instruction that describes the task: ### Input: Return a dictionary of only the subset of keys/values specified in keys ### Response: def _dict_subset(keys, master_dict): ''' Return a dictionary of only the subset of keys/values specified in keys ''' return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys])
def console_list_load_xp( filename: str ) -> Optional[List[tcod.console.Console]]: """Return a list of consoles from a REXPaint `.xp` file.""" tcod_list = lib.TCOD_console_list_from_xp(filename.encode("utf-8")) if tcod_list == ffi.NULL: return None try: python_list = [] lib.TCOD_list_reverse(tcod_list) while not lib.TCOD_list_is_empty(tcod_list): python_list.append( tcod.console.Console._from_cdata(lib.TCOD_list_pop(tcod_list)) ) return python_list finally: lib.TCOD_list_delete(tcod_list)
Return a list of consoles from a REXPaint `.xp` file.
Below is the the instruction that describes the task: ### Input: Return a list of consoles from a REXPaint `.xp` file. ### Response: def console_list_load_xp( filename: str ) -> Optional[List[tcod.console.Console]]: """Return a list of consoles from a REXPaint `.xp` file.""" tcod_list = lib.TCOD_console_list_from_xp(filename.encode("utf-8")) if tcod_list == ffi.NULL: return None try: python_list = [] lib.TCOD_list_reverse(tcod_list) while not lib.TCOD_list_is_empty(tcod_list): python_list.append( tcod.console.Console._from_cdata(lib.TCOD_list_pop(tcod_list)) ) return python_list finally: lib.TCOD_list_delete(tcod_list)
def write(self, text: str): """ Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print. """ # Default color is NORMAL. last_color = (self._DARK_CODE, 0) # We use splitlines with keepends in order to keep the line breaks. # Then we split by using the console width. original_lines = text.splitlines(True) lines = self._split_lines(original_lines) if self._width_limit else original_lines # Print the new width-formatted lines. for line in lines: # Print indents only at line beginnings. if not self._in_line: self._writer.write(' ' * self.indents_sum) # Remove colors if needed. if not self._colors: for color_code in self._ANSI_REGEXP.findall(line): line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') elif not self._ANSI_REGEXP.match(line): # Check if the line starts with a color. If not, we apply the color from the last line. line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line # Print the final line. self._writer.write(line) # Update the in_line status. self._in_line = not line.endswith(self.LINE_SEP) # Update the last color used. if self._colors: last_color = self._ANSI_REGEXP.findall(line)[-1] # Update last position (if there was no line break in the end). if len(lines) > 0: last_line = lines[-1] if not last_line.endswith(self.LINE_SEP): # Strip the colors to figure out the real number of characters in the line. if self._colors: for color_code in self._ANSI_REGEXP.findall(last_line): last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') self._last_position += len(last_line) else: self._last_position = 0 self._is_first_line = False else: self._last_position = 0 # Reset colors for the next print. if self._colors and not text.endswith(self.NORMAL): self._writer.write(self.NORMAL)
Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print.
Below is the the instruction that describes the task: ### Input: Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print. ### Response: def write(self, text: str): """ Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print. """ # Default color is NORMAL. last_color = (self._DARK_CODE, 0) # We use splitlines with keepends in order to keep the line breaks. # Then we split by using the console width. original_lines = text.splitlines(True) lines = self._split_lines(original_lines) if self._width_limit else original_lines # Print the new width-formatted lines. for line in lines: # Print indents only at line beginnings. if not self._in_line: self._writer.write(' ' * self.indents_sum) # Remove colors if needed. if not self._colors: for color_code in self._ANSI_REGEXP.findall(line): line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') elif not self._ANSI_REGEXP.match(line): # Check if the line starts with a color. If not, we apply the color from the last line. line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line # Print the final line. self._writer.write(line) # Update the in_line status. self._in_line = not line.endswith(self.LINE_SEP) # Update the last color used. if self._colors: last_color = self._ANSI_REGEXP.findall(line)[-1] # Update last position (if there was no line break in the end). if len(lines) > 0: last_line = lines[-1] if not last_line.endswith(self.LINE_SEP): # Strip the colors to figure out the real number of characters in the line. if self._colors: for color_code in self._ANSI_REGEXP.findall(last_line): last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') self._last_position += len(last_line) else: self._last_position = 0 self._is_first_line = False else: self._last_position = 0 # Reset colors for the next print. if self._colors and not text.endswith(self.NORMAL): self._writer.write(self.NORMAL)
def query(self, expression, vm='python'): """Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array """ condition = self.eval(expression, vm=vm) return self.compress(condition)
Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array
Below is the the instruction that describes the task: ### Input: Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array ### Response: def query(self, expression, vm='python'): """Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array """ condition = self.eval(expression, vm=vm) return self.compress(condition)
def distance_to_edge(self, skydir): """Return the angular distance from the given direction and the edge of the projection.""" xpix, ypix = skydir.to_pixel(self.wcs, origin=0) deltax = np.array((xpix - self._pix_center[0]) * self._pix_size[0], ndmin=1) deltay = np.array((ypix - self._pix_center[1]) * self._pix_size[1], ndmin=1) deltax = np.abs(deltax) - 0.5 * self._width[0] deltay = np.abs(deltay) - 0.5 * self._width[1] m0 = (deltax < 0) & (deltay < 0) m1 = (deltax > 0) & (deltay < 0) m2 = (deltax < 0) & (deltay > 0) m3 = (deltax > 0) & (deltay > 0) mx = np.abs(deltax) <= np.abs(deltay) my = np.abs(deltay) < np.abs(deltax) delta = np.zeros(len(deltax)) delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1] delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2] return delta
Return the angular distance from the given direction and the edge of the projection.
Below is the the instruction that describes the task: ### Input: Return the angular distance from the given direction and the edge of the projection. ### Response: def distance_to_edge(self, skydir): """Return the angular distance from the given direction and the edge of the projection.""" xpix, ypix = skydir.to_pixel(self.wcs, origin=0) deltax = np.array((xpix - self._pix_center[0]) * self._pix_size[0], ndmin=1) deltay = np.array((ypix - self._pix_center[1]) * self._pix_size[1], ndmin=1) deltax = np.abs(deltax) - 0.5 * self._width[0] deltay = np.abs(deltay) - 0.5 * self._width[1] m0 = (deltax < 0) & (deltay < 0) m1 = (deltax > 0) & (deltay < 0) m2 = (deltax < 0) & (deltay > 0) m3 = (deltax > 0) & (deltay > 0) mx = np.abs(deltax) <= np.abs(deltay) my = np.abs(deltay) < np.abs(deltax) delta = np.zeros(len(deltax)) delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1] delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2] return delta
def setValues(self, values): """ Assign the values (string or float) to the parameter instances with the specified indices, equivalent to the AMPL code: .. code-block:: ampl let {i in indices} par[i] := values[i]; Args: values: list, dictionary or :class:`~amplpy.DataFrame` with the indices and the values to be set. Raises: TypeError: If called on a scalar parameter. """ if isinstance(values, dict): indices, values = list(zip(*values.items())) indices = Utils.toTupleArray(indices) if any(isinstance(value, basestring) for value in values): values = list(map(str, values)) self._impl.setValuesTaStr(indices, values, len(values)) elif all(isinstance(value, Real) for value in values): values = list(map(float, values)) self._impl.setValuesTaDbl(indices, values, len(values)) else: raise TypeError elif isinstance(values, (list, tuple)): if any(isinstance(value, basestring) for value in values): values = list(map(str, values)) self._impl.setValuesStr(values, len(values)) elif all(isinstance(value, Real) for value in values): values = list(map(float, values)) self._impl.setValuesDbl(values, len(values)) else: raise TypeError else: if np is not None and isinstance(values, np.ndarray): self.setValues(DataFrame.fromNumpy(values).toList()) return Entity.setValues(self, values)
Assign the values (string or float) to the parameter instances with the specified indices, equivalent to the AMPL code: .. code-block:: ampl let {i in indices} par[i] := values[i]; Args: values: list, dictionary or :class:`~amplpy.DataFrame` with the indices and the values to be set. Raises: TypeError: If called on a scalar parameter.
Below is the the instruction that describes the task: ### Input: Assign the values (string or float) to the parameter instances with the specified indices, equivalent to the AMPL code: .. code-block:: ampl let {i in indices} par[i] := values[i]; Args: values: list, dictionary or :class:`~amplpy.DataFrame` with the indices and the values to be set. Raises: TypeError: If called on a scalar parameter. ### Response: def setValues(self, values): """ Assign the values (string or float) to the parameter instances with the specified indices, equivalent to the AMPL code: .. code-block:: ampl let {i in indices} par[i] := values[i]; Args: values: list, dictionary or :class:`~amplpy.DataFrame` with the indices and the values to be set. Raises: TypeError: If called on a scalar parameter. """ if isinstance(values, dict): indices, values = list(zip(*values.items())) indices = Utils.toTupleArray(indices) if any(isinstance(value, basestring) for value in values): values = list(map(str, values)) self._impl.setValuesTaStr(indices, values, len(values)) elif all(isinstance(value, Real) for value in values): values = list(map(float, values)) self._impl.setValuesTaDbl(indices, values, len(values)) else: raise TypeError elif isinstance(values, (list, tuple)): if any(isinstance(value, basestring) for value in values): values = list(map(str, values)) self._impl.setValuesStr(values, len(values)) elif all(isinstance(value, Real) for value in values): values = list(map(float, values)) self._impl.setValuesDbl(values, len(values)) else: raise TypeError else: if np is not None and isinstance(values, np.ndarray): self.setValues(DataFrame.fromNumpy(values).toList()) return Entity.setValues(self, values)
def update_all(self, *args, **kwargs): """Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). """ updated_item_count = 0 repository = repo_factory.get_repository(self._entity_cls) try: updated_item_count = repository.update_all(self._criteria, *args, **kwargs) except Exception: # FIXME Log Exception raise return updated_item_count
Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value).
Below is the the instruction that describes the task: ### Input: Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). ### Response: def update_all(self, *args, **kwargs): """Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). """ updated_item_count = 0 repository = repo_factory.get_repository(self._entity_cls) try: updated_item_count = repository.update_all(self._criteria, *args, **kwargs) except Exception: # FIXME Log Exception raise return updated_item_count
def train(data_train, model, nsp_loss, mlm_loss, vocab_size, ctx): """Training function.""" hvd.broadcast_parameters(model.collect_params(), root_rank=0) mlm_metric = nlp.metric.MaskedAccuracy() nsp_metric = nlp.metric.MaskedAccuracy() mlm_metric.reset() nsp_metric.reset() logging.debug('Creating distributed trainer...') lr = args.lr optim_params = {'learning_rate': lr, 'epsilon': 1e-6, 'wd': 0.01} if args.dtype == 'float16': optim_params['multi_precision'] = True dynamic_loss_scale = args.dtype == 'float16' if dynamic_loss_scale: loss_scale_param = {'scale_window': 2000 / num_workers} else: loss_scale_param = None trainer = hvd.DistributedTrainer(model.collect_params(), 'bertadam', optim_params) fp16_trainer = FP16Trainer(trainer, dynamic_loss_scale=dynamic_loss_scale, loss_scaler_params=loss_scale_param) if args.ckpt_dir and args.start_step: trainer.load_states(os.path.join(args.ckpt_dir, '%07d.states'%args.start_step)) accumulate = args.accumulate num_train_steps = args.num_steps warmup_ratio = args.warmup_ratio num_warmup_steps = int(num_train_steps * warmup_ratio) params = [p for p in model.collect_params().values() if p.grad_req != 'null'] param_dict = model.collect_params() # Do not apply weight decay on LayerNorm and bias terms for _, v in model.collect_params('.*beta|.*gamma|.*bias').items(): v.wd_mult = 0.0 if accumulate > 1: for p in params: p.grad_req = 'add' train_begin_time = time.time() begin_time = time.time() running_mlm_loss, running_nsp_loss = 0, 0 running_num_tks = 0 batch_num = 0 step_num = args.start_step logging.debug('Training started') while step_num < num_train_steps: for _, dataloader in enumerate(data_train): if step_num >= num_train_steps: break # create dummy data loader if needed if args.dummy_data_len: target_shape = (args.batch_size, args.dummy_data_len) dataloader = get_dummy_dataloader(dataloader, target_shape) for _, data_batch in enumerate(dataloader): if step_num >= num_train_steps: break if batch_num % accumulate == 0: step_num += 1 # if accumulate > 1, grad_req is set to 'add', and zero_grad is required if accumulate > 1: param_dict.zero_grad() # update learning rate if step_num <= num_warmup_steps: new_lr = lr * step_num / num_warmup_steps else: offset = lr * step_num / num_train_steps new_lr = lr - offset trainer.set_learning_rate(new_lr) if args.profile: profile(step_num, 10, 14, profile_name=args.profile + str(rank)) # load data if args.use_avg_len: data_list = [[seq.as_in_context(context) for seq in shard] for context, shard in zip([ctx], data_batch)] else: data_list = list(split_and_load(data_batch, [ctx])) data = data_list[0] # forward with mx.autograd.record(): (ls, ns_label, classified, masked_id, decoded, \ masked_weight, ls1, ls2, valid_len) = forward(data, model, mlm_loss, nsp_loss, vocab_size, args.dtype) ls = ls / accumulate # backward if args.dtype == 'float16': fp16_trainer.backward(ls) else: ls.backward() running_mlm_loss += ls1.as_in_context(mx.cpu()) running_nsp_loss += ls2.as_in_context(mx.cpu()) running_num_tks += valid_len.sum().as_in_context(mx.cpu()) # update if (batch_num + 1) % accumulate == 0: # step() performs 3 things: # 1. allreduce gradients from all workers # 2. checking the global_norm of gradients and clip them if necessary # 3. averaging the gradients and apply updates fp16_trainer.step(1, max_norm=1*num_workers) nsp_metric.update([ns_label], [classified]) mlm_metric.update([masked_id], [decoded], [masked_weight]) # logging if (step_num + 1) % (args.log_interval) == 0 and (batch_num + 1) % accumulate == 0: log(begin_time, running_num_tks, running_mlm_loss / accumulate, running_nsp_loss / accumulate, step_num, mlm_metric, nsp_metric, trainer, args.log_interval) begin_time = time.time() running_mlm_loss = running_nsp_loss = running_num_tks = 0 mlm_metric.reset_local() nsp_metric.reset_local() # saving checkpoints if args.ckpt_dir and (step_num + 1) % (args.ckpt_interval) == 0 \ and (batch_num + 1) % accumulate == 0 and local_rank == 0: save_params(step_num, model, trainer, args.ckpt_dir) batch_num += 1 if local_rank == 0: save_params(step_num, model, trainer, args.ckpt_dir) mx.nd.waitall() train_end_time = time.time() logging.info('Train cost={:.1f}s'.format(train_end_time - train_begin_time))
Training function.
Below is the the instruction that describes the task: ### Input: Training function. ### Response: def train(data_train, model, nsp_loss, mlm_loss, vocab_size, ctx): """Training function.""" hvd.broadcast_parameters(model.collect_params(), root_rank=0) mlm_metric = nlp.metric.MaskedAccuracy() nsp_metric = nlp.metric.MaskedAccuracy() mlm_metric.reset() nsp_metric.reset() logging.debug('Creating distributed trainer...') lr = args.lr optim_params = {'learning_rate': lr, 'epsilon': 1e-6, 'wd': 0.01} if args.dtype == 'float16': optim_params['multi_precision'] = True dynamic_loss_scale = args.dtype == 'float16' if dynamic_loss_scale: loss_scale_param = {'scale_window': 2000 / num_workers} else: loss_scale_param = None trainer = hvd.DistributedTrainer(model.collect_params(), 'bertadam', optim_params) fp16_trainer = FP16Trainer(trainer, dynamic_loss_scale=dynamic_loss_scale, loss_scaler_params=loss_scale_param) if args.ckpt_dir and args.start_step: trainer.load_states(os.path.join(args.ckpt_dir, '%07d.states'%args.start_step)) accumulate = args.accumulate num_train_steps = args.num_steps warmup_ratio = args.warmup_ratio num_warmup_steps = int(num_train_steps * warmup_ratio) params = [p for p in model.collect_params().values() if p.grad_req != 'null'] param_dict = model.collect_params() # Do not apply weight decay on LayerNorm and bias terms for _, v in model.collect_params('.*beta|.*gamma|.*bias').items(): v.wd_mult = 0.0 if accumulate > 1: for p in params: p.grad_req = 'add' train_begin_time = time.time() begin_time = time.time() running_mlm_loss, running_nsp_loss = 0, 0 running_num_tks = 0 batch_num = 0 step_num = args.start_step logging.debug('Training started') while step_num < num_train_steps: for _, dataloader in enumerate(data_train): if step_num >= num_train_steps: break # create dummy data loader if needed if args.dummy_data_len: target_shape = (args.batch_size, args.dummy_data_len) dataloader = get_dummy_dataloader(dataloader, target_shape) for _, data_batch in enumerate(dataloader): if step_num >= num_train_steps: break if batch_num % accumulate == 0: step_num += 1 # if accumulate > 1, grad_req is set to 'add', and zero_grad is required if accumulate > 1: param_dict.zero_grad() # update learning rate if step_num <= num_warmup_steps: new_lr = lr * step_num / num_warmup_steps else: offset = lr * step_num / num_train_steps new_lr = lr - offset trainer.set_learning_rate(new_lr) if args.profile: profile(step_num, 10, 14, profile_name=args.profile + str(rank)) # load data if args.use_avg_len: data_list = [[seq.as_in_context(context) for seq in shard] for context, shard in zip([ctx], data_batch)] else: data_list = list(split_and_load(data_batch, [ctx])) data = data_list[0] # forward with mx.autograd.record(): (ls, ns_label, classified, masked_id, decoded, \ masked_weight, ls1, ls2, valid_len) = forward(data, model, mlm_loss, nsp_loss, vocab_size, args.dtype) ls = ls / accumulate # backward if args.dtype == 'float16': fp16_trainer.backward(ls) else: ls.backward() running_mlm_loss += ls1.as_in_context(mx.cpu()) running_nsp_loss += ls2.as_in_context(mx.cpu()) running_num_tks += valid_len.sum().as_in_context(mx.cpu()) # update if (batch_num + 1) % accumulate == 0: # step() performs 3 things: # 1. allreduce gradients from all workers # 2. checking the global_norm of gradients and clip them if necessary # 3. averaging the gradients and apply updates fp16_trainer.step(1, max_norm=1*num_workers) nsp_metric.update([ns_label], [classified]) mlm_metric.update([masked_id], [decoded], [masked_weight]) # logging if (step_num + 1) % (args.log_interval) == 0 and (batch_num + 1) % accumulate == 0: log(begin_time, running_num_tks, running_mlm_loss / accumulate, running_nsp_loss / accumulate, step_num, mlm_metric, nsp_metric, trainer, args.log_interval) begin_time = time.time() running_mlm_loss = running_nsp_loss = running_num_tks = 0 mlm_metric.reset_local() nsp_metric.reset_local() # saving checkpoints if args.ckpt_dir and (step_num + 1) % (args.ckpt_interval) == 0 \ and (batch_num + 1) % accumulate == 0 and local_rank == 0: save_params(step_num, model, trainer, args.ckpt_dir) batch_num += 1 if local_rank == 0: save_params(step_num, model, trainer, args.ckpt_dir) mx.nd.waitall() train_end_time = time.time() logging.info('Train cost={:.1f}s'.format(train_end_time - train_begin_time))
def getmacbyip(ip, chainCC=0): """Return MAC address corresponding to a given IP address""" if isinstance(ip, Net): ip = next(iter(ip)) ip = inet_ntoa(inet_aton(ip or "0.0.0.0")) tmp = [orb(e) for e in inet_aton(ip)] if (tmp[0] & 0xf0) == 0xe0: # mcast @ return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1] & 0x7f, tmp[2], tmp[3]) iff, _, gw = conf.route.route(ip) if ((iff == consts.LOOPBACK_INTERFACE) or (ip == conf.route.get_if_bcast(iff))): # noqa: E501 return "ff:ff:ff:ff:ff:ff" if gw != "0.0.0.0": ip = gw mac = conf.netcache.arp_cache.get(ip) if mac: return mac try: res = srp1(Ether(dst=ETHER_BROADCAST) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface=iff, timeout=2, verbose=0, chainCC=chainCC, nofilter=1) except Exception: return None if res is not None: mac = res.payload.hwsrc conf.netcache.arp_cache[ip] = mac return mac return None
Return MAC address corresponding to a given IP address
Below is the the instruction that describes the task: ### Input: Return MAC address corresponding to a given IP address ### Response: def getmacbyip(ip, chainCC=0): """Return MAC address corresponding to a given IP address""" if isinstance(ip, Net): ip = next(iter(ip)) ip = inet_ntoa(inet_aton(ip or "0.0.0.0")) tmp = [orb(e) for e in inet_aton(ip)] if (tmp[0] & 0xf0) == 0xe0: # mcast @ return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1] & 0x7f, tmp[2], tmp[3]) iff, _, gw = conf.route.route(ip) if ((iff == consts.LOOPBACK_INTERFACE) or (ip == conf.route.get_if_bcast(iff))): # noqa: E501 return "ff:ff:ff:ff:ff:ff" if gw != "0.0.0.0": ip = gw mac = conf.netcache.arp_cache.get(ip) if mac: return mac try: res = srp1(Ether(dst=ETHER_BROADCAST) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface=iff, timeout=2, verbose=0, chainCC=chainCC, nofilter=1) except Exception: return None if res is not None: mac = res.payload.hwsrc conf.netcache.arp_cache[ip] = mac return mac return None
def get_name_history(name, hostport=None, proxy=None, history_page=None): """ Get the full history of a name Returns {'status': True, 'history': ...} on success, where history is grouped by block Returns {'error': ...} on error """ assert hostport or proxy, 'Need hostport or proxy' if proxy is None: proxy = connect_hostport(hostport) hist = {} indexing = None lastblock = None if history_page != None: resp = get_name_history_page(name, history_page, proxy=proxy) if 'error' in resp: return resp indexing = resp['indexing'] lastblock = resp['lastblock'] return {'status': True, 'history': resp['history'], 'indexing': indexing, 'lastblock': lastblock} for i in range(0, 100000000): # this is obviously too big resp = get_name_history_page(name, i, proxy=proxy) if 'error' in resp: return resp indexing = resp['indexing'] lastblock = resp['lastblock'] if len(resp['history']) == 0: # caught up break hist = name_history_merge(hist, resp['history']) return {'status': True, 'history': hist, 'indexing': indexing, 'lastblock': lastblock}
Get the full history of a name Returns {'status': True, 'history': ...} on success, where history is grouped by block Returns {'error': ...} on error
Below is the the instruction that describes the task: ### Input: Get the full history of a name Returns {'status': True, 'history': ...} on success, where history is grouped by block Returns {'error': ...} on error ### Response: def get_name_history(name, hostport=None, proxy=None, history_page=None): """ Get the full history of a name Returns {'status': True, 'history': ...} on success, where history is grouped by block Returns {'error': ...} on error """ assert hostport or proxy, 'Need hostport or proxy' if proxy is None: proxy = connect_hostport(hostport) hist = {} indexing = None lastblock = None if history_page != None: resp = get_name_history_page(name, history_page, proxy=proxy) if 'error' in resp: return resp indexing = resp['indexing'] lastblock = resp['lastblock'] return {'status': True, 'history': resp['history'], 'indexing': indexing, 'lastblock': lastblock} for i in range(0, 100000000): # this is obviously too big resp = get_name_history_page(name, i, proxy=proxy) if 'error' in resp: return resp indexing = resp['indexing'] lastblock = resp['lastblock'] if len(resp['history']) == 0: # caught up break hist = name_history_merge(hist, resp['history']) return {'status': True, 'history': hist, 'indexing': indexing, 'lastblock': lastblock}
def query(self, search_model: QueryModel): """Query elasticsearch for objects. :param search_model: object of QueryModel. :return: list of objects that match the query. """ query_parsed = query_parser(search_model.query) self.logger.debug(f'elasticsearch::query::{query_parsed[0]}') if search_model.sort is not None: self._mapping_to_sort(search_model.sort.keys()) sort = self._sort_object(search_model.sort) else: sort = [{"_id": "asc"}] if search_model.query == {}: query = {'match_all': {}} else: query = query_parsed[0] body = { 'query': query, 'sort': sort, 'from': (search_model.page - 1) * search_model.offset, 'size': search_model.offset, } page = self.driver._es.search( index=self.driver._index, doc_type='_doc', body=body, q=query_parsed[1] ) object_list = [] for x in page['hits']['hits']: object_list.append(x['_source']) return object_list
Query elasticsearch for objects. :param search_model: object of QueryModel. :return: list of objects that match the query.
Below is the the instruction that describes the task: ### Input: Query elasticsearch for objects. :param search_model: object of QueryModel. :return: list of objects that match the query. ### Response: def query(self, search_model: QueryModel): """Query elasticsearch for objects. :param search_model: object of QueryModel. :return: list of objects that match the query. """ query_parsed = query_parser(search_model.query) self.logger.debug(f'elasticsearch::query::{query_parsed[0]}') if search_model.sort is not None: self._mapping_to_sort(search_model.sort.keys()) sort = self._sort_object(search_model.sort) else: sort = [{"_id": "asc"}] if search_model.query == {}: query = {'match_all': {}} else: query = query_parsed[0] body = { 'query': query, 'sort': sort, 'from': (search_model.page - 1) * search_model.offset, 'size': search_model.offset, } page = self.driver._es.search( index=self.driver._index, doc_type='_doc', body=body, q=query_parsed[1] ) object_list = [] for x in page['hits']['hits']: object_list.append(x['_source']) return object_list
def supports_calendar_type(self, calendar_type): """Tests if the given calendar type is supported. arg: calendar_type (osid.type.Type): a calendar Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``DATETIME`` or ``DURATION`` raise: NullArgument - ``calendar_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.Metadata.supports_coordinate_type if self._kwargs['syntax'] not in ['``DATETIME``', '``DURATION``']: raise errors.IllegalState() return calendar_type in self.get_calendar_types
Tests if the given calendar type is supported. arg: calendar_type (osid.type.Type): a calendar Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``DATETIME`` or ``DURATION`` raise: NullArgument - ``calendar_type`` is ``null`` *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Tests if the given calendar type is supported. arg: calendar_type (osid.type.Type): a calendar Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``DATETIME`` or ``DURATION`` raise: NullArgument - ``calendar_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* ### Response: def supports_calendar_type(self, calendar_type): """Tests if the given calendar type is supported. arg: calendar_type (osid.type.Type): a calendar Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``DATETIME`` or ``DURATION`` raise: NullArgument - ``calendar_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.Metadata.supports_coordinate_type if self._kwargs['syntax'] not in ['``DATETIME``', '``DURATION``']: raise errors.IllegalState() return calendar_type in self.get_calendar_types
def processClubAttendance(f, clubs): """Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process. """ try: # Skip as many empty lines as necessary (file format inconsistent) line = f.next() while line == ',,,,,,,,,,,,,,,,,,,\n': line = f.next() # The first non-empty line should have the name as the first field name = line.split(',')[0] # Create a new club object if needed if name not in clubs: clubs[name] = Club(name) # Get the named club c = clubs[name] c.processAttendance(f) return True except StopIteration: return False
Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process.
Below is the the instruction that describes the task: ### Input: Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process. ### Response: def processClubAttendance(f, clubs): """Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process. """ try: # Skip as many empty lines as necessary (file format inconsistent) line = f.next() while line == ',,,,,,,,,,,,,,,,,,,\n': line = f.next() # The first non-empty line should have the name as the first field name = line.split(',')[0] # Create a new club object if needed if name not in clubs: clubs[name] = Club(name) # Get the named club c = clubs[name] c.processAttendance(f) return True except StopIteration: return False
def supports(cls, template_file=None): """ :return: Whether the engine can process given template file or not. """ if anytemplate.compat.IS_PYTHON_3: cls._priority = 99 return False # Always as it's not ported to python 3. return super(Engine, cls).supports(template_file=template_file)
:return: Whether the engine can process given template file or not.
Below is the the instruction that describes the task: ### Input: :return: Whether the engine can process given template file or not. ### Response: def supports(cls, template_file=None): """ :return: Whether the engine can process given template file or not. """ if anytemplate.compat.IS_PYTHON_3: cls._priority = 99 return False # Always as it's not ported to python 3. return super(Engine, cls).supports(template_file=template_file)
def facetrecordtrees(table, key, start='start', stop='stop'): """ Construct faceted interval trees for the given table, where each node in the tree is a record. """ import intervaltree getstart = attrgetter(start) getstop = attrgetter(stop) getkey = attrgetter(key) trees = dict() for rec in records(table): k = getkey(rec) if k not in trees: trees[k] = intervaltree.IntervalTree() trees[k].addi(getstart(rec), getstop(rec), rec) return trees
Construct faceted interval trees for the given table, where each node in the tree is a record.
Below is the the instruction that describes the task: ### Input: Construct faceted interval trees for the given table, where each node in the tree is a record. ### Response: def facetrecordtrees(table, key, start='start', stop='stop'): """ Construct faceted interval trees for the given table, where each node in the tree is a record. """ import intervaltree getstart = attrgetter(start) getstop = attrgetter(stop) getkey = attrgetter(key) trees = dict() for rec in records(table): k = getkey(rec) if k not in trees: trees[k] = intervaltree.IntervalTree() trees[k].addi(getstart(rec), getstop(rec), rec) return trees
def halt(self): """ halt: None -> None If this instance has a separate thread running, it will be halted. This method will wait until the thread has cleaned up before returning. """ if self._callback: self._thread_continue = False self._thread.join()
halt: None -> None If this instance has a separate thread running, it will be halted. This method will wait until the thread has cleaned up before returning.
Below is the the instruction that describes the task: ### Input: halt: None -> None If this instance has a separate thread running, it will be halted. This method will wait until the thread has cleaned up before returning. ### Response: def halt(self): """ halt: None -> None If this instance has a separate thread running, it will be halted. This method will wait until the thread has cleaned up before returning. """ if self._callback: self._thread_continue = False self._thread.join()
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext """ if self._context is None: self._context = AvailablePhoneNumberCountryContext( self._version, account_sid=self._solution['account_sid'], country_code=self._solution['country_code'], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext
Below is the the instruction that describes the task: ### Input: Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext ### Response: def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext """ if self._context is None: self._context = AvailablePhoneNumberCountryContext( self._version, account_sid=self._solution['account_sid'], country_code=self._solution['country_code'], ) return self._context
def copy(self, remote_path_from, remote_path_to): """Copies resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_COPY :param remote_path_from: the path to resource which will be copied, :param remote_path_to: the path where resource will be copied. """ urn_from = Urn(remote_path_from) if not self.check(urn_from.path()): raise RemoteResourceNotFound(urn_from.path()) urn_to = Urn(remote_path_to) if not self.check(urn_to.parent()): raise RemoteParentNotFound(urn_to.path()) header_destination = f'Destination: {self.get_full_path(urn_to)}' self.execute_request(action='copy', path=urn_from.quote(), headers_ext=[header_destination])
Copies resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_COPY :param remote_path_from: the path to resource which will be copied, :param remote_path_to: the path where resource will be copied.
Below is the the instruction that describes the task: ### Input: Copies resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_COPY :param remote_path_from: the path to resource which will be copied, :param remote_path_to: the path where resource will be copied. ### Response: def copy(self, remote_path_from, remote_path_to): """Copies resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_COPY :param remote_path_from: the path to resource which will be copied, :param remote_path_to: the path where resource will be copied. """ urn_from = Urn(remote_path_from) if not self.check(urn_from.path()): raise RemoteResourceNotFound(urn_from.path()) urn_to = Urn(remote_path_to) if not self.check(urn_to.parent()): raise RemoteParentNotFound(urn_to.path()) header_destination = f'Destination: {self.get_full_path(urn_to)}' self.execute_request(action='copy', path=urn_from.quote(), headers_ext=[header_destination])
def save(self): """ Generate a random username before falling back to parent signup form """ while True: username = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5] try: get_user_model().objects.get(username__iexact=username) except get_user_model().DoesNotExist: break self.cleaned_data['username'] = username return super(SignupFormOnlyEmail, self).save()
Generate a random username before falling back to parent signup form
Below is the the instruction that describes the task: ### Input: Generate a random username before falling back to parent signup form ### Response: def save(self): """ Generate a random username before falling back to parent signup form """ while True: username = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5] try: get_user_model().objects.get(username__iexact=username) except get_user_model().DoesNotExist: break self.cleaned_data['username'] = username return super(SignupFormOnlyEmail, self).save()
def get_and_update_setting(self, name, default=None): '''Look for a setting in the environment (first priority) and then the settings file (second). If something is found, the settings file is updated. The order of operations works as follows: 1. The .sregistry settings file is used as a cache for the variable 2. the environment variable always takes priority to cache, and if found, will update the cache. 3. If the variable is not found and the cache is set, we are good 5. If the variable is not found and the cache isn't set, return default (default is None) So the user of the function can assume a return of None equates to not set anywhere, and take the appropriate action. ''' setting = self._get_setting(name) if setting is None and default is not None: setting = default # If the setting is found, update the client secrets if setting is not None: updates = {name : setting} update_client_secrets(backend=self.client_name, updates=updates) return setting
Look for a setting in the environment (first priority) and then the settings file (second). If something is found, the settings file is updated. The order of operations works as follows: 1. The .sregistry settings file is used as a cache for the variable 2. the environment variable always takes priority to cache, and if found, will update the cache. 3. If the variable is not found and the cache is set, we are good 5. If the variable is not found and the cache isn't set, return default (default is None) So the user of the function can assume a return of None equates to not set anywhere, and take the appropriate action.
Below is the the instruction that describes the task: ### Input: Look for a setting in the environment (first priority) and then the settings file (second). If something is found, the settings file is updated. The order of operations works as follows: 1. The .sregistry settings file is used as a cache for the variable 2. the environment variable always takes priority to cache, and if found, will update the cache. 3. If the variable is not found and the cache is set, we are good 5. If the variable is not found and the cache isn't set, return default (default is None) So the user of the function can assume a return of None equates to not set anywhere, and take the appropriate action. ### Response: def get_and_update_setting(self, name, default=None): '''Look for a setting in the environment (first priority) and then the settings file (second). If something is found, the settings file is updated. The order of operations works as follows: 1. The .sregistry settings file is used as a cache for the variable 2. the environment variable always takes priority to cache, and if found, will update the cache. 3. If the variable is not found and the cache is set, we are good 5. If the variable is not found and the cache isn't set, return default (default is None) So the user of the function can assume a return of None equates to not set anywhere, and take the appropriate action. ''' setting = self._get_setting(name) if setting is None and default is not None: setting = default # If the setting is found, update the client secrets if setting is not None: updates = {name : setting} update_client_secrets(backend=self.client_name, updates=updates) return setting
def zcount(self, key, min=float('-inf'), max=float('inf'), *, exclude=None): """Count the members in a sorted set with scores within the given values. :raises TypeError: min or max is not float or int :raises ValueError: if min greater than max """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if min > max: raise ValueError("min could not be greater than max") return self.execute(b'ZCOUNT', key, *_encode_min_max(exclude, min, max))
Count the members in a sorted set with scores within the given values. :raises TypeError: min or max is not float or int :raises ValueError: if min greater than max
Below is the the instruction that describes the task: ### Input: Count the members in a sorted set with scores within the given values. :raises TypeError: min or max is not float or int :raises ValueError: if min greater than max ### Response: def zcount(self, key, min=float('-inf'), max=float('inf'), *, exclude=None): """Count the members in a sorted set with scores within the given values. :raises TypeError: min or max is not float or int :raises ValueError: if min greater than max """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if min > max: raise ValueError("min could not be greater than max") return self.execute(b'ZCOUNT', key, *_encode_min_max(exclude, min, max))
def _model_to_dict(obj): """ Convert object model to dict. Args: obj: Object model. Returns: dict: Converted model. """ result = _properties_model_to_dict(obj.properties) for attribute in ('metadata', 'snapshot'): try: value = getattr(obj, attribute) except AttributeError: continue if value: result[attribute] = value return result
Convert object model to dict. Args: obj: Object model. Returns: dict: Converted model.
Below is the the instruction that describes the task: ### Input: Convert object model to dict. Args: obj: Object model. Returns: dict: Converted model. ### Response: def _model_to_dict(obj): """ Convert object model to dict. Args: obj: Object model. Returns: dict: Converted model. """ result = _properties_model_to_dict(obj.properties) for attribute in ('metadata', 'snapshot'): try: value = getattr(obj, attribute) except AttributeError: continue if value: result[attribute] = value return result
def calc_humidity(temp, dewpoint): ''' calculates the humidity via the formula from weatherwise.org return the relative humidity ''' t = fahrenheit_to_celsius(temp) td = fahrenheit_to_celsius(dewpoint) num = 112 - (0.1 * t) + td denom = 112 + (0.9 * t) rh = math.pow((num / denom), 8) return rh
calculates the humidity via the formula from weatherwise.org return the relative humidity
Below is the the instruction that describes the task: ### Input: calculates the humidity via the formula from weatherwise.org return the relative humidity ### Response: def calc_humidity(temp, dewpoint): ''' calculates the humidity via the formula from weatherwise.org return the relative humidity ''' t = fahrenheit_to_celsius(temp) td = fahrenheit_to_celsius(dewpoint) num = 112 - (0.1 * t) + td denom = 112 + (0.9 * t) rh = math.pow((num / denom), 8) return rh
def voigt_symmetrized(self): """ Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices """ if not (self.rank % 2 == 0 and self.rank >= 2): raise ValueError("V-symmetrization requires rank even and >= 2") v = self.voigt perms = list(itertools.permutations(range(len(v.shape)))) new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms) return self.__class__.from_voigt(new_v)
Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices
Below is the the instruction that describes the task: ### Input: Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices ### Response: def voigt_symmetrized(self): """ Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices """ if not (self.rank % 2 == 0 and self.rank >= 2): raise ValueError("V-symmetrization requires rank even and >= 2") v = self.voigt perms = list(itertools.permutations(range(len(v.shape)))) new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms) return self.__class__.from_voigt(new_v)
def umask(self, new_mask): """Change the current umask. Args: new_mask: (int) The new umask value. Returns: The old umask. Raises: TypeError: if new_mask is of an invalid type. """ if not is_int_type(new_mask): raise TypeError('an integer is required') old_umask = self.filesystem.umask self.filesystem.umask = new_mask return old_umask
Change the current umask. Args: new_mask: (int) The new umask value. Returns: The old umask. Raises: TypeError: if new_mask is of an invalid type.
Below is the the instruction that describes the task: ### Input: Change the current umask. Args: new_mask: (int) The new umask value. Returns: The old umask. Raises: TypeError: if new_mask is of an invalid type. ### Response: def umask(self, new_mask): """Change the current umask. Args: new_mask: (int) The new umask value. Returns: The old umask. Raises: TypeError: if new_mask is of an invalid type. """ if not is_int_type(new_mask): raise TypeError('an integer is required') old_umask = self.filesystem.umask self.filesystem.umask = new_mask return old_umask
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course = self.course_factory.get_course(courseid) username = self.user_manager.session_username() error = False change = False msg = "" data = web.input() if self.user_manager.has_staff_rights_on_course(course): raise web.notfound() elif not self.user_manager.course_is_open_to_user(course, lti=False): return self.template_helper.get_renderer().course_unavailable() elif "register_group" in data: change = True if course.can_students_choose_group() and course.use_classrooms(): aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username}) if int(data["register_group"]) >= 0 and (len(aggregation["groups"]) > int(data["register_group"])): group = aggregation["groups"][int(data["register_group"])] if group["size"] > len(group["students"]): for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) aggregation["groups"][int(data["register_group"])]["students"].append(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) self._logger.info("User %s registered to group %s/%s/%s", username, courseid, aggregation["description"], data["register_group"]) else: error = True msg = _("Couldn't register to the specified group.") elif course.can_students_choose_group(): aggregation = self.database.aggregations.find_one( {"courseid": course.get_id(), "students": username}) if aggregation is not None: aggregation["students"].remove(username) for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) # Add student in the classroom and unique group self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])}, {"$push": {"students": username}}) new_aggregation = self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])}, {"$push": {"groups.0.students": username}}) if new_aggregation is None: error = True msg = _("Couldn't register to the specified group.") else: self._logger.info("User %s registered to team %s/%s", username, courseid, aggregation["description"]) else: error = True msg = _("You are not allowed to change group.") elif "unregister_group" in data: change = True if course.can_students_choose_group(): aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username, "groups.students": username}) if aggregation is not None: for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) self._logger.info("User %s unregistered from group/team %s/%s", username, courseid, aggregation["description"]) else: error = True msg = _("You're not registered in a group.") else: error = True msg = _("You are not allowed to change group.") tasks = course.get_tasks() last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": courseid, "taskid": {"$in": list(tasks.keys())}}) for submission in last_submissions: submission["taskname"] = tasks[submission['taskid']].get_name(self.user_manager.session_language()) aggregation = self.user_manager.get_course_user_aggregation(course) aggregations = self.user_manager.get_course_aggregations(course) users = self.user_manager.get_users_info(self.user_manager.get_course_registered_users(course)) if course.use_classrooms(): mygroup = None for index, group in enumerate(aggregation["groups"]): if self.user_manager.session_username() in group["students"]: mygroup = group mygroup["index"] = index + 1 return self.template_helper.get_renderer().classroom(course, last_submissions, aggregation, users, mygroup, msg, error, change) else: return self.template_helper.get_renderer().team(course, last_submissions, aggregations, users, aggregation, msg, error)
GET request
Below is the the instruction that describes the task: ### Input: GET request ### Response: def GET_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course = self.course_factory.get_course(courseid) username = self.user_manager.session_username() error = False change = False msg = "" data = web.input() if self.user_manager.has_staff_rights_on_course(course): raise web.notfound() elif not self.user_manager.course_is_open_to_user(course, lti=False): return self.template_helper.get_renderer().course_unavailable() elif "register_group" in data: change = True if course.can_students_choose_group() and course.use_classrooms(): aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username}) if int(data["register_group"]) >= 0 and (len(aggregation["groups"]) > int(data["register_group"])): group = aggregation["groups"][int(data["register_group"])] if group["size"] > len(group["students"]): for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) aggregation["groups"][int(data["register_group"])]["students"].append(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) self._logger.info("User %s registered to group %s/%s/%s", username, courseid, aggregation["description"], data["register_group"]) else: error = True msg = _("Couldn't register to the specified group.") elif course.can_students_choose_group(): aggregation = self.database.aggregations.find_one( {"courseid": course.get_id(), "students": username}) if aggregation is not None: aggregation["students"].remove(username) for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) # Add student in the classroom and unique group self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])}, {"$push": {"students": username}}) new_aggregation = self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])}, {"$push": {"groups.0.students": username}}) if new_aggregation is None: error = True msg = _("Couldn't register to the specified group.") else: self._logger.info("User %s registered to team %s/%s", username, courseid, aggregation["description"]) else: error = True msg = _("You are not allowed to change group.") elif "unregister_group" in data: change = True if course.can_students_choose_group(): aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username, "groups.students": username}) if aggregation is not None: for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) self._logger.info("User %s unregistered from group/team %s/%s", username, courseid, aggregation["description"]) else: error = True msg = _("You're not registered in a group.") else: error = True msg = _("You are not allowed to change group.") tasks = course.get_tasks() last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": courseid, "taskid": {"$in": list(tasks.keys())}}) for submission in last_submissions: submission["taskname"] = tasks[submission['taskid']].get_name(self.user_manager.session_language()) aggregation = self.user_manager.get_course_user_aggregation(course) aggregations = self.user_manager.get_course_aggregations(course) users = self.user_manager.get_users_info(self.user_manager.get_course_registered_users(course)) if course.use_classrooms(): mygroup = None for index, group in enumerate(aggregation["groups"]): if self.user_manager.session_username() in group["students"]: mygroup = group mygroup["index"] = index + 1 return self.template_helper.get_renderer().classroom(course, last_submissions, aggregation, users, mygroup, msg, error, change) else: return self.template_helper.get_renderer().team(course, last_submissions, aggregations, users, aggregation, msg, error)
def _learnOnNewSegments(connections, rng, newSegmentCells, growthCandidates, initialPermanence, sampleSize, maxSynapsesPerSegment): """ Create new segments, and grow synapses on them. @param connections (SparseMatrixConnections) @param rng (Random) @param newSegmentCells (numpy array) @param growthCandidates (numpy array) """ numNewSynapses = len(growthCandidates) if sampleSize != -1: numNewSynapses = min(numNewSynapses, sampleSize) if maxSynapsesPerSegment != -1: numNewSynapses = min(numNewSynapses, maxSynapsesPerSegment) newSegments = connections.createSegments(newSegmentCells) connections.growSynapsesToSample(newSegments, growthCandidates, numNewSynapses, initialPermanence, rng)
Create new segments, and grow synapses on them. @param connections (SparseMatrixConnections) @param rng (Random) @param newSegmentCells (numpy array) @param growthCandidates (numpy array)
Below is the the instruction that describes the task: ### Input: Create new segments, and grow synapses on them. @param connections (SparseMatrixConnections) @param rng (Random) @param newSegmentCells (numpy array) @param growthCandidates (numpy array) ### Response: def _learnOnNewSegments(connections, rng, newSegmentCells, growthCandidates, initialPermanence, sampleSize, maxSynapsesPerSegment): """ Create new segments, and grow synapses on them. @param connections (SparseMatrixConnections) @param rng (Random) @param newSegmentCells (numpy array) @param growthCandidates (numpy array) """ numNewSynapses = len(growthCandidates) if sampleSize != -1: numNewSynapses = min(numNewSynapses, sampleSize) if maxSynapsesPerSegment != -1: numNewSynapses = min(numNewSynapses, maxSynapsesPerSegment) newSegments = connections.createSegments(newSegmentCells) connections.growSynapsesToSample(newSegments, growthCandidates, numNewSynapses, initialPermanence, rng)
def plot_color_map_bars(values, vmin=None, vmax=None, color_map=None, axis=None, **kwargs): ''' Plot bar for each value in `values`, colored based on values mapped onto the specified color map. Args ---- values (pandas.Series) : Numeric values to plot one bar per value. axis : A matplotlib axis. If `None`, an axis is created. vmin : Minimum value to clip values at. vmax : Maximum value to clip values at. color_map : A matplotlib color map (see `matplotlib.cm`). **kwargs : Extra keyword arguments to pass to `values.plot`. Returns ------- (axis) : Bar plot axis. ''' if axis is None: fig, axis = plt.subplots() norm = mpl.colors.Normalize(vmin=vmin or min(values), vmax=vmax or max(values), clip=True) if color_map is None: color_map = mpl.rcParams['image.cmap'] colors = color_map(norm(values.values).filled()) values.plot(kind='bar', ax=axis, color=colors, **kwargs) return axis
Plot bar for each value in `values`, colored based on values mapped onto the specified color map. Args ---- values (pandas.Series) : Numeric values to plot one bar per value. axis : A matplotlib axis. If `None`, an axis is created. vmin : Minimum value to clip values at. vmax : Maximum value to clip values at. color_map : A matplotlib color map (see `matplotlib.cm`). **kwargs : Extra keyword arguments to pass to `values.plot`. Returns ------- (axis) : Bar plot axis.
Below is the the instruction that describes the task: ### Input: Plot bar for each value in `values`, colored based on values mapped onto the specified color map. Args ---- values (pandas.Series) : Numeric values to plot one bar per value. axis : A matplotlib axis. If `None`, an axis is created. vmin : Minimum value to clip values at. vmax : Maximum value to clip values at. color_map : A matplotlib color map (see `matplotlib.cm`). **kwargs : Extra keyword arguments to pass to `values.plot`. Returns ------- (axis) : Bar plot axis. ### Response: def plot_color_map_bars(values, vmin=None, vmax=None, color_map=None, axis=None, **kwargs): ''' Plot bar for each value in `values`, colored based on values mapped onto the specified color map. Args ---- values (pandas.Series) : Numeric values to plot one bar per value. axis : A matplotlib axis. If `None`, an axis is created. vmin : Minimum value to clip values at. vmax : Maximum value to clip values at. color_map : A matplotlib color map (see `matplotlib.cm`). **kwargs : Extra keyword arguments to pass to `values.plot`. Returns ------- (axis) : Bar plot axis. ''' if axis is None: fig, axis = plt.subplots() norm = mpl.colors.Normalize(vmin=vmin or min(values), vmax=vmax or max(values), clip=True) if color_map is None: color_map = mpl.rcParams['image.cmap'] colors = color_map(norm(values.values).filled()) values.plot(kind='bar', ax=axis, color=colors, **kwargs) return axis
def disaggregate_temperature(data_daily, method='sine_min_max', min_max_time='fix', mod_nighttime=False, max_delta=None, mean_course=None, sun_times=None): """The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times() """ if method not in ( 'sine_min_max', 'sine_mean', 'sine', 'mean_course_min_max', 'mean_course_mean', ): raise ValueError('Invalid option') temp_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index)) if method in ('sine_min_max', 'sine_mean', 'sine'): # for this option assume time of minimum and maximum and fit cosine function through minimum and maximum temperatures hours_per_day = 24 default_shift_hours = 2 daylength_thres = 3 # min / max hour during polar night assumption min_loc_polar = 6 max_loc_polar = 18 locdf = pd.DataFrame( index=data_daily.index, columns=[ 'min_loc', 'max_loc', 'min_val_before', 'min_val_cur', 'min_val_next', 'max_val_before', 'max_val_cur', 'max_val_next', 'mean_val_cur', ] ) if min_max_time == 'fix': # take fixed location for minimum and maximum locdf.min_loc = 7 locdf.max_loc = 14 elif min_max_time == 'sun_loc': # take location for minimum and maximum by sunrise / sunnoon + 2h locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = sun_times.sunnoon.round() + default_shift_hours # sun noon round to full hour + fix 2h elif min_max_time == 'sun_loc_shift': # take location for minimum and maximum by sunrise / sunnoon + monthly delta locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = (sun_times.sunnoon + max_delta[locdf.index.month].values).round() # sun noon + shift derived from observed hourly data, round to full hour pos = locdf.min_loc > locdf.max_loc locdf.loc[pos, 'max_loc'] = sun_times.sunnoon[pos].round() + default_shift_hours # standard shift in this case locdf.min_loc = locdf.min_loc.astype(int) locdf.max_loc = locdf.max_loc.astype(int) locdf.min_val_cur = data_daily.tmin locdf.max_val_cur = data_daily.tmax locdf.mean_val_cur = data_daily.temp locdf.min_val_next = data_daily.tmin.shift(-1, 'D') locdf.max_val_next = data_daily.tmax.shift(-1, 'D') locdf.loc[locdf.index[-1], 'min_val_next'] = locdf.min_val_cur.iloc[-1] locdf.loc[locdf.index[-1], 'max_val_next'] = locdf.max_val_cur.iloc[-1] locdf.min_val_before = data_daily.tmin.shift(1, 'D') locdf.max_val_before = data_daily.tmax.shift(1, 'D') locdf.loc[locdf.index[0], 'min_val_before'] = locdf.min_val_cur.iloc[0] locdf.loc[locdf.index[0], 'max_val_before'] = locdf.max_val_cur.iloc[0] locdf_day = locdf locdf = locdf.reindex(temp_disagg.index, method='ffill') # whenever we are before the maximum for the current day, use minimum value of current day for cosine function fitting # once we have passed the maximum value use the minimum for next day to ensure smooth transitions min_val = locdf.min_val_next.copy() min_val[min_val.index.hour < locdf.max_loc] = locdf.min_val_cur # whenever we are before the minimum for the current day, use maximum value of day before for cosine function fitting # once we have passed the minimum value use the maximum for the current day to ensure smooth transitions max_val = locdf.max_val_cur.copy() max_val[max_val.index.hour < locdf.min_loc] = locdf.max_val_before temp_disagg = pd.Series(index=min_val.index) if method in ('sine_min_max', 'sine'): delta_val = max_val - min_val v_trans = min_val + delta_val / 2. if mod_nighttime: before_min = locdf.index.hour <= locdf.min_loc between_min_max = (locdf.index.hour > locdf.min_loc) & (locdf.index.hour < locdf.max_loc) after_max = locdf.index.hour >= locdf.max_loc temp_disagg[before_min] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (hours_per_day - locdf.max_loc + locdf.index.hour)) temp_disagg[between_min_max] = v_trans + delta_val / 2. * np.cos(1.25 * np.pi + 0.75 * np.pi / (locdf.max_loc - locdf.min_loc) * (locdf.index.hour - locdf.min_loc)) temp_disagg[after_max] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (locdf.index.hour - locdf.max_loc)) else: temp_disagg[:] = v_trans + (delta_val / 2.) * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) elif method == 'sine_mean': dtr = locdf.max_val_cur - locdf.min_val_cur temp_disagg[:] = locdf.mean_val_cur + dtr / 2. * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) polars = sun_times.daylength < daylength_thres if polars.sum() > 0: # during polar night, no diurnal variation of temperature is applied # instead the daily average calculated using tmin and tmax is applied polars_index_hourly = melodist.util.hourly_index(polars[polars].index) temp_disagg.loc[polars_index_hourly] = np.nan avg_before = (locdf_day.min_val_before + locdf_day.max_val_before) / 2. avg_cur = (locdf_day.min_val_cur + locdf_day.max_val_cur) / 2. getting_warmers = polars & (avg_before <= avg_cur) getting_colders = polars & ~(avg_before <= avg_cur) getting_warmers_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_warmers[getting_warmers].index]) getting_warmers_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_warmers[getting_warmers].index]) temp_disagg[getting_warmers_min_loc] = locdf_day.min_val_cur[getting_warmers].values temp_disagg[getting_warmers_max_loc] = locdf_day.max_val_cur[getting_warmers].values getting_colders_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_colders[getting_colders].index]) getting_colders_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_colders[getting_colders].index]) temp_disagg[getting_colders_min_loc] = locdf_day.max_val_cur[getting_colders].values temp_disagg[getting_colders_max_loc] = locdf_day.min_val_cur[getting_colders].values temp_polars = temp_disagg.loc[polars_index_hourly].copy() transition_days = polars[polars.diff() == True].astype(int) # 0 where transition from polar to "normal" mode, 1 where transition from normal to polar if len(transition_days) > 0: polar_to_normal_days = transition_days.index[transition_days == 0] normal_to_polar_days = transition_days.index[transition_days == 1] - pd.Timedelta(days=1) add_days = polar_to_normal_days.union(normal_to_polar_days) temp_polars = temp_polars.append(temp_disagg[melodist.util.hourly_index(add_days)]).sort_index() for day in polar_to_normal_days: min_loc = int(locdf.loc[day].min_loc) temp_polars[day.replace(hour=0):day.replace(hour=min_loc) - pd.Timedelta(hours=1)] = np.nan temp_polars[day.replace(hour=min_loc)] = locdf.min_val_cur[day] for day in normal_to_polar_days: max_loc = int(locdf.loc[day].max_loc) temp_polars[day.replace(hour=max_loc) + pd.Timedelta(hours=1):day.replace(hour=23)] = np.nan temp_interp = temp_polars.interpolate(method='linear', limit=23) temp_disagg[temp_interp.index] = temp_interp elif method == 'mean_course_min_max': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) df = pd.DataFrame(index=temp_disagg.index) df['normval'] = mean_course.unstack().loc[list(zip(df.index.month, df.index.hour))].values df['tmin'] = data_daily_as_hourly.tmin df['tmax'] = data_daily_as_hourly.tmax temp_disagg[:] = df.normval * (df.tmax - df.tmin) + df.tmin elif method == 'mean_course_mean': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) dtr = data_daily_as_hourly.tmax - data_daily_as_hourly.tmin mc = pd.Series(index=temp_disagg.index) mean_course_zeromean = mean_course - mean_course.mean() # shift mean course so that the daily mean is 0 mc[:] = mean_course_zeromean.unstack().loc[list(zip(temp_disagg.index.month, temp_disagg.index.hour))].values temp_disagg[:] = data_daily_as_hourly.temp + dtr * mc return temp_disagg
The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times()
Below is the the instruction that describes the task: ### Input: The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times() ### Response: def disaggregate_temperature(data_daily, method='sine_min_max', min_max_time='fix', mod_nighttime=False, max_delta=None, mean_course=None, sun_times=None): """The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times() """ if method not in ( 'sine_min_max', 'sine_mean', 'sine', 'mean_course_min_max', 'mean_course_mean', ): raise ValueError('Invalid option') temp_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index)) if method in ('sine_min_max', 'sine_mean', 'sine'): # for this option assume time of minimum and maximum and fit cosine function through minimum and maximum temperatures hours_per_day = 24 default_shift_hours = 2 daylength_thres = 3 # min / max hour during polar night assumption min_loc_polar = 6 max_loc_polar = 18 locdf = pd.DataFrame( index=data_daily.index, columns=[ 'min_loc', 'max_loc', 'min_val_before', 'min_val_cur', 'min_val_next', 'max_val_before', 'max_val_cur', 'max_val_next', 'mean_val_cur', ] ) if min_max_time == 'fix': # take fixed location for minimum and maximum locdf.min_loc = 7 locdf.max_loc = 14 elif min_max_time == 'sun_loc': # take location for minimum and maximum by sunrise / sunnoon + 2h locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = sun_times.sunnoon.round() + default_shift_hours # sun noon round to full hour + fix 2h elif min_max_time == 'sun_loc_shift': # take location for minimum and maximum by sunrise / sunnoon + monthly delta locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = (sun_times.sunnoon + max_delta[locdf.index.month].values).round() # sun noon + shift derived from observed hourly data, round to full hour pos = locdf.min_loc > locdf.max_loc locdf.loc[pos, 'max_loc'] = sun_times.sunnoon[pos].round() + default_shift_hours # standard shift in this case locdf.min_loc = locdf.min_loc.astype(int) locdf.max_loc = locdf.max_loc.astype(int) locdf.min_val_cur = data_daily.tmin locdf.max_val_cur = data_daily.tmax locdf.mean_val_cur = data_daily.temp locdf.min_val_next = data_daily.tmin.shift(-1, 'D') locdf.max_val_next = data_daily.tmax.shift(-1, 'D') locdf.loc[locdf.index[-1], 'min_val_next'] = locdf.min_val_cur.iloc[-1] locdf.loc[locdf.index[-1], 'max_val_next'] = locdf.max_val_cur.iloc[-1] locdf.min_val_before = data_daily.tmin.shift(1, 'D') locdf.max_val_before = data_daily.tmax.shift(1, 'D') locdf.loc[locdf.index[0], 'min_val_before'] = locdf.min_val_cur.iloc[0] locdf.loc[locdf.index[0], 'max_val_before'] = locdf.max_val_cur.iloc[0] locdf_day = locdf locdf = locdf.reindex(temp_disagg.index, method='ffill') # whenever we are before the maximum for the current day, use minimum value of current day for cosine function fitting # once we have passed the maximum value use the minimum for next day to ensure smooth transitions min_val = locdf.min_val_next.copy() min_val[min_val.index.hour < locdf.max_loc] = locdf.min_val_cur # whenever we are before the minimum for the current day, use maximum value of day before for cosine function fitting # once we have passed the minimum value use the maximum for the current day to ensure smooth transitions max_val = locdf.max_val_cur.copy() max_val[max_val.index.hour < locdf.min_loc] = locdf.max_val_before temp_disagg = pd.Series(index=min_val.index) if method in ('sine_min_max', 'sine'): delta_val = max_val - min_val v_trans = min_val + delta_val / 2. if mod_nighttime: before_min = locdf.index.hour <= locdf.min_loc between_min_max = (locdf.index.hour > locdf.min_loc) & (locdf.index.hour < locdf.max_loc) after_max = locdf.index.hour >= locdf.max_loc temp_disagg[before_min] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (hours_per_day - locdf.max_loc + locdf.index.hour)) temp_disagg[between_min_max] = v_trans + delta_val / 2. * np.cos(1.25 * np.pi + 0.75 * np.pi / (locdf.max_loc - locdf.min_loc) * (locdf.index.hour - locdf.min_loc)) temp_disagg[after_max] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (locdf.index.hour - locdf.max_loc)) else: temp_disagg[:] = v_trans + (delta_val / 2.) * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) elif method == 'sine_mean': dtr = locdf.max_val_cur - locdf.min_val_cur temp_disagg[:] = locdf.mean_val_cur + dtr / 2. * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) polars = sun_times.daylength < daylength_thres if polars.sum() > 0: # during polar night, no diurnal variation of temperature is applied # instead the daily average calculated using tmin and tmax is applied polars_index_hourly = melodist.util.hourly_index(polars[polars].index) temp_disagg.loc[polars_index_hourly] = np.nan avg_before = (locdf_day.min_val_before + locdf_day.max_val_before) / 2. avg_cur = (locdf_day.min_val_cur + locdf_day.max_val_cur) / 2. getting_warmers = polars & (avg_before <= avg_cur) getting_colders = polars & ~(avg_before <= avg_cur) getting_warmers_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_warmers[getting_warmers].index]) getting_warmers_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_warmers[getting_warmers].index]) temp_disagg[getting_warmers_min_loc] = locdf_day.min_val_cur[getting_warmers].values temp_disagg[getting_warmers_max_loc] = locdf_day.max_val_cur[getting_warmers].values getting_colders_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_colders[getting_colders].index]) getting_colders_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_colders[getting_colders].index]) temp_disagg[getting_colders_min_loc] = locdf_day.max_val_cur[getting_colders].values temp_disagg[getting_colders_max_loc] = locdf_day.min_val_cur[getting_colders].values temp_polars = temp_disagg.loc[polars_index_hourly].copy() transition_days = polars[polars.diff() == True].astype(int) # 0 where transition from polar to "normal" mode, 1 where transition from normal to polar if len(transition_days) > 0: polar_to_normal_days = transition_days.index[transition_days == 0] normal_to_polar_days = transition_days.index[transition_days == 1] - pd.Timedelta(days=1) add_days = polar_to_normal_days.union(normal_to_polar_days) temp_polars = temp_polars.append(temp_disagg[melodist.util.hourly_index(add_days)]).sort_index() for day in polar_to_normal_days: min_loc = int(locdf.loc[day].min_loc) temp_polars[day.replace(hour=0):day.replace(hour=min_loc) - pd.Timedelta(hours=1)] = np.nan temp_polars[day.replace(hour=min_loc)] = locdf.min_val_cur[day] for day in normal_to_polar_days: max_loc = int(locdf.loc[day].max_loc) temp_polars[day.replace(hour=max_loc) + pd.Timedelta(hours=1):day.replace(hour=23)] = np.nan temp_interp = temp_polars.interpolate(method='linear', limit=23) temp_disagg[temp_interp.index] = temp_interp elif method == 'mean_course_min_max': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) df = pd.DataFrame(index=temp_disagg.index) df['normval'] = mean_course.unstack().loc[list(zip(df.index.month, df.index.hour))].values df['tmin'] = data_daily_as_hourly.tmin df['tmax'] = data_daily_as_hourly.tmax temp_disagg[:] = df.normval * (df.tmax - df.tmin) + df.tmin elif method == 'mean_course_mean': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) dtr = data_daily_as_hourly.tmax - data_daily_as_hourly.tmin mc = pd.Series(index=temp_disagg.index) mean_course_zeromean = mean_course - mean_course.mean() # shift mean course so that the daily mean is 0 mc[:] = mean_course_zeromean.unstack().loc[list(zip(temp_disagg.index.month, temp_disagg.index.hour))].values temp_disagg[:] = data_daily_as_hourly.temp + dtr * mc return temp_disagg
def purview(repertoire): """The purview of the repertoire. Args: repertoire (np.ndarray): A repertoire Returns: tuple[int]: The purview that the repertoire was computed over. """ if repertoire is None: return None return tuple(i for i, dim in enumerate(repertoire.shape) if dim == 2)
The purview of the repertoire. Args: repertoire (np.ndarray): A repertoire Returns: tuple[int]: The purview that the repertoire was computed over.
Below is the the instruction that describes the task: ### Input: The purview of the repertoire. Args: repertoire (np.ndarray): A repertoire Returns: tuple[int]: The purview that the repertoire was computed over. ### Response: def purview(repertoire): """The purview of the repertoire. Args: repertoire (np.ndarray): A repertoire Returns: tuple[int]: The purview that the repertoire was computed over. """ if repertoire is None: return None return tuple(i for i, dim in enumerate(repertoire.shape) if dim == 2)
def get_preferred(self, addr_1, addr_2): '''Return the preferred address.''' if addr_1 > addr_2: addr_1, addr_2 = addr_2, addr_1 return self._cache.get((addr_1, addr_2))
Return the preferred address.
Below is the the instruction that describes the task: ### Input: Return the preferred address. ### Response: def get_preferred(self, addr_1, addr_2): '''Return the preferred address.''' if addr_1 > addr_2: addr_1, addr_2 = addr_2, addr_1 return self._cache.get((addr_1, addr_2))
def _colorize(self, msg, color=None, encode=False): """ Colorize a string. """ # Valid colors colors = { 'red': '31', 'green': '32', 'yellow': '33' } # No color specified or unsupported color if not color or not color in colors: return msg # The colorized string if encode: return u'\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg) return '\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg)
Colorize a string.
Below is the the instruction that describes the task: ### Input: Colorize a string. ### Response: def _colorize(self, msg, color=None, encode=False): """ Colorize a string. """ # Valid colors colors = { 'red': '31', 'green': '32', 'yellow': '33' } # No color specified or unsupported color if not color or not color in colors: return msg # The colorized string if encode: return u'\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg) return '\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg)
def add(data_dic): ''' Insert new record. ''' uid = data_dic['uid'] TabLog.create( uid=uid, current_url=data_dic['url'], refer_url=data_dic['refer'], user_id=data_dic['user_id'], time_create=data_dic['timein'], time_out=data_dic['timeOut'], time=data_dic['timeon'] ) return uid
Insert new record.
Below is the the instruction that describes the task: ### Input: Insert new record. ### Response: def add(data_dic): ''' Insert new record. ''' uid = data_dic['uid'] TabLog.create( uid=uid, current_url=data_dic['url'], refer_url=data_dic['refer'], user_id=data_dic['user_id'], time_create=data_dic['timein'], time_out=data_dic['timeOut'], time=data_dic['timeon'] ) return uid
def ensure_crops(self, *required_crops): """ Make sure a crop exists for each crop in required_crops. Existing crops will not be changed. If settings.ASSET_CELERY is specified then the task will be run async """ if self._can_crop(): if settings.CELERY or settings.USE_CELERY_DECORATOR: # this means that we are using celery args = [self.pk]+list(required_crops) tasks.ensure_crops.apply_async(args=args, countdown=5) else: tasks.ensure_crops(None, *required_crops, asset=self)
Make sure a crop exists for each crop in required_crops. Existing crops will not be changed. If settings.ASSET_CELERY is specified then the task will be run async
Below is the the instruction that describes the task: ### Input: Make sure a crop exists for each crop in required_crops. Existing crops will not be changed. If settings.ASSET_CELERY is specified then the task will be run async ### Response: def ensure_crops(self, *required_crops): """ Make sure a crop exists for each crop in required_crops. Existing crops will not be changed. If settings.ASSET_CELERY is specified then the task will be run async """ if self._can_crop(): if settings.CELERY or settings.USE_CELERY_DECORATOR: # this means that we are using celery args = [self.pk]+list(required_crops) tasks.ensure_crops.apply_async(args=args, countdown=5) else: tasks.ensure_crops(None, *required_crops, asset=self)
def get_probability_grammar(self): """ A method that returns probability grammar """ # Creating valid word expression for probability, it is of the format # wor1 | var2 , var3 or var1 var2 var3 or simply var word_expr = Word(alphanums + '-' + '_') + Suppress(Optional("|")) + Suppress(Optional(",")) word_expr2 = Word(initChars=printables, excludeChars=[',', ')', ' ', '(']) + Suppress(Optional(",")) # creating an expression for valid numbers, of the format # 1.00 or 1 or 1.00. 0.00 or 9.8e-5 etc num_expr = Word(nums + '-' + '+' + 'e' + 'E' + '.') + Suppress(Optional(",")) probability_expr = Suppress('probability') + Suppress('(') + OneOrMore(word_expr) + Suppress(')') optional_expr = Suppress('(') + OneOrMore(word_expr2) + Suppress(')') probab_attributes = optional_expr | Suppress('table') cpd_expr = probab_attributes + OneOrMore(num_expr) return probability_expr, cpd_expr
A method that returns probability grammar
Below is the the instruction that describes the task: ### Input: A method that returns probability grammar ### Response: def get_probability_grammar(self): """ A method that returns probability grammar """ # Creating valid word expression for probability, it is of the format # wor1 | var2 , var3 or var1 var2 var3 or simply var word_expr = Word(alphanums + '-' + '_') + Suppress(Optional("|")) + Suppress(Optional(",")) word_expr2 = Word(initChars=printables, excludeChars=[',', ')', ' ', '(']) + Suppress(Optional(",")) # creating an expression for valid numbers, of the format # 1.00 or 1 or 1.00. 0.00 or 9.8e-5 etc num_expr = Word(nums + '-' + '+' + 'e' + 'E' + '.') + Suppress(Optional(",")) probability_expr = Suppress('probability') + Suppress('(') + OneOrMore(word_expr) + Suppress(')') optional_expr = Suppress('(') + OneOrMore(word_expr2) + Suppress(')') probab_attributes = optional_expr | Suppress('table') cpd_expr = probab_attributes + OneOrMore(num_expr) return probability_expr, cpd_expr
def _compute_magnitude_term(self, C, dc1, mag): """ Computes the magnitude scaling term given by equation (2) """ base = C['theta1'] + (self.CONSTS['theta4'] * dc1) dmag = self.CONSTS["C1"] + dc1 if mag > dmag: f_mag = (self.CONSTS['theta5'] * (mag - dmag)) +\ C['theta13'] * ((10. - mag) ** 2.) else: f_mag = (self.CONSTS['theta4'] * (mag - dmag)) +\ C['theta13'] * ((10. - mag) ** 2.) return base + f_mag
Computes the magnitude scaling term given by equation (2)
Below is the the instruction that describes the task: ### Input: Computes the magnitude scaling term given by equation (2) ### Response: def _compute_magnitude_term(self, C, dc1, mag): """ Computes the magnitude scaling term given by equation (2) """ base = C['theta1'] + (self.CONSTS['theta4'] * dc1) dmag = self.CONSTS["C1"] + dc1 if mag > dmag: f_mag = (self.CONSTS['theta5'] * (mag - dmag)) +\ C['theta13'] * ((10. - mag) ** 2.) else: f_mag = (self.CONSTS['theta4'] * (mag - dmag)) +\ C['theta13'] * ((10. - mag) ** 2.) return base + f_mag
def compare_hives(fs0, fs1): """Compares all the windows registry hive files returning those which differ. """ registries = [] for path in chain(registries_path(fs0.fsroot), user_registries(fs0, fs1)): if fs0.checksum(path) != fs1.checksum(path): registries.append(path) return registries
Compares all the windows registry hive files returning those which differ.
Below is the the instruction that describes the task: ### Input: Compares all the windows registry hive files returning those which differ. ### Response: def compare_hives(fs0, fs1): """Compares all the windows registry hive files returning those which differ. """ registries = [] for path in chain(registries_path(fs0.fsroot), user_registries(fs0, fs1)): if fs0.checksum(path) != fs1.checksum(path): registries.append(path) return registries