code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def __init__(self, builtins, interpreter_builtins, interpreter): <NEW_LINE> <INDENT> super().__init__(__BUILTIN_POS__) <NEW_LINE> for id, f in builtins.items(): <NEW_LINE> <INDENT> self.create_local(Syn('ID', id, __BUILTIN_POS__), f) <NEW_LINE> <DEDENT> for id, make_func in interpreter_builtins.items(): <NEW_LINE> <INDENT> bulitin_func = make_func(interpreter) <NEW_LINE> self.create_local(Syn('ID', id, __BUILTIN_POS__), bulitin_func)
:param builtins: :type builtins: dict[str,function] :return: :rtype:
625941c26fb2d068a760f03c
def add_repository(self, repo_source, repo_type, repo_alias, repo_prio): <NEW_LINE> <INDENT> self.xml_data.add_repository( xml_parse.repository( type_=repo_type, alias=repo_alias, priority=repo_prio, source=xml_parse.source(path=repo_source) ) )
Add a new repository section at the end of the list :param string repo_source: repository URI :param string repo_type: type name defined by schema :param string repo_alias: alias name :param string repo_prio: priority number, package manager specific
625941c2c432627299f04be5
def _validate_node_links_json(node_type, node_links, errors): <NEW_LINE> <INDENT> assert isinstance(errors, dict), "errors must be a dict." <NEW_LINE> if not isinstance(node_links, list): <NEW_LINE> <INDENT> errors['links'] = _("links must be a list") <NEW_LINE> return False <NEW_LINE> <DEDENT> if node_type == Start.node_type: <NEW_LINE> <INDENT> if len(node_links) != 2: <NEW_LINE> <INDENT> errors['links'] = _("Start should have two children: 'related' to end, 'to' to any node but an end.") <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> elif node_type == End.node_type: <NEW_LINE> <INDENT> if len(node_links) != 0: <NEW_LINE> <INDENT> errors['links'] = _("End should have no children.") <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> elif node_type == Kill.node_type: <NEW_LINE> <INDENT> if len(node_links) != 0: <NEW_LINE> <INDENT> errors['links'] = _("Kill should have no children.") <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> elif node_type in (Join.node_type, DecisionEnd.node_type): <NEW_LINE> <INDENT> if len(node_links) != 1: <NEW_LINE> <INDENT> errors['links'] = _("Join and Decision End should have one child: 'to' to any node.") <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> elif node_type in (Fork.node_type, Decision.node_type): <NEW_LINE> <INDENT> if len(node_links) < 2: <NEW_LINE> <INDENT> errors['links'] = _("Fork and Decision should have at least two children: 'related' to their respective ends, 'start' to any node.") <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if len(node_links) != 2: <NEW_LINE> <INDENT> errors['links'] = _("Actions should have two children: 'error' to kill, 'ok' to any node.") <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> link_names_by_node_type = { 'start': {'related': 1, 'to': 1}, 'end': {}, 'kill': {}, 'fork': {'related': 1, 'start': 2}, 'join': {'to': 1}, 'decision': {'related': 1, 'start': 2}, 'decisionend': {'to': 1}, None: {'ok': 1, 'error': 1}, } <NEW_LINE> link_types = link_names_by_node_type.get(node_type, link_names_by_node_type[None]) <NEW_LINE> for link in node_links: <NEW_LINE> <INDENT> link_name = link.get('name', None) <NEW_LINE> if link_name in link_types: <NEW_LINE> <INDENT> link_types[link_name] -= 1 <NEW_LINE> <DEDENT> <DEDENT> for link_type in link_types: <NEW_LINE> <INDENT> if link_types[link_type] > 0: <NEW_LINE> <INDENT> errors['links'] = _('%(node_type)s should have %(count)d more %(link_type)s link' % { 'node_type': node_type, 'count': link_types[link_type], 'link_type': link_type }) <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> return True
Validate a single node's links. node_type is the node type of the action information passed. node_links is list of dictionaries describing the links. errors is a dictionary that will be populated with any found errors.
625941c24c3428357757c2ca
def build(platform="mac", release=False): <NEW_LINE> <INDENT> specs = load_spec() <NEW_LINE> ensure_build() <NEW_LINE> TARGETS = specs['targets'] <NEW_LINE> LIBS = specs["libraries"] <NEW_LINE> MAIN = specs["main"] <NEW_LINE> INCLUDES = [BINARIES_DIR, BUILD_DIR] <NEW_LINE> EXTRA_FLAGS = [] <NEW_LINE> if platform == "mac": <NEW_LINE> <INDENT> EXTRA_FLAGS.append("-d:TARGET_MAC") <NEW_LINE> <DEDENT> if release: <NEW_LINE> <INDENT> EXTRA_FLAGS.append("-O") <NEW_LINE> <DEDENT> PREFIX = BUILD_DIR <NEW_LINE> graph = {} <NEW_LINE> for library in LIBS: <NEW_LINE> <INDENT> graph[library] = (False, []) <NEW_LINE> <DEDENT> for target, deps in TARGETS.items(): <NEW_LINE> <INDENT> suffix = ".exe" if target == MAIN else ".dll" <NEW_LINE> src = target + ".fs" <NEW_LINE> dst = os.path.join(BUILD_DIR, target + suffix) <NEW_LINE> updated = newer(src, dst) <NEW_LINE> graph[target] = (updated, deps) <NEW_LINE> <DEDENT> order = resolve_build_order(graph, MAIN) <NEW_LINE> needs_rebuild = False <NEW_LINE> for t, n in order: <NEW_LINE> <INDENT> if n: <NEW_LINE> <INDENT> needs_rebuild = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if not needs_rebuild: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> for target, needs_build in order: <NEW_LINE> <INDENT> if needs_build: <NEW_LINE> <INDENT> extra = EXTRA_FLAGS <NEW_LINE> if target == MAIN: <NEW_LINE> <INDENT> lib = False <NEW_LINE> if release: <NEW_LINE> <INDENT> extra.append("--standalone") <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> lib = True <NEW_LINE> <DEDENT> print("> Building '{}'...".format(target)) <NEW_LINE> cmd = get_build_command(graph, target, prefix=PREFIX, includes=INCLUDES, library=lib, extra_flags=extra) <NEW_LINE> print("$ {}".format(" ".join(cmd))) <NEW_LINE> exit_status = subprocess.call(cmd) <NEW_LINE> if exit_status: <NEW_LINE> <INDENT> print("> Failed to build '{}'".format(target)) <NEW_LINE> return exit_status <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print("> '{}' was built, skipping...".format(target)) <NEW_LINE> <DEDENT> <DEDENT> if platform == "mac": <NEW_LINE> <INDENT> bundled = bundle() <NEW_LINE> return bundled <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return built
Builds the F# program from the given specification dictionary
625941c2507cdc57c6306c77
def scrape_cv() -> None: <NEW_LINE> <INDENT> files_dict = get_arguments() <NEW_LINE> for ind in range(len(files_dict['input'])): <NEW_LINE> <INDENT> cv_dict = docx_to_dict(files_dict['input'][ind], files_dict['images'][ind]) <NEW_LINE> with open(files_dict['output'][ind], 'w') as f: <NEW_LINE> <INDENT> json.dump(cv_dict, f, indent=4)
Convert ordina cv to json
625941c2de87d2750b85fd31
def __init__(self, exe_return_filename, weights_method, prev_node, class_name): <NEW_LINE> <INDENT> super().__init__(prev_node) <NEW_LINE> CFooterNode.__instance = self <NEW_LINE> dim = CHeaderNode.instance().in_dim <NEW_LINE> id = CHeaderNode.instance().id <NEW_LINE> self.in_dim = prev_node.out_dim <NEW_LINE> self.in_var = prev_node.out_var <NEW_LINE> self.x_dim = dim[0] <NEW_LINE> self.y_dim = dim[1] <NEW_LINE> self.class_name = class_name <NEW_LINE> if len(dim) > 2: <NEW_LINE> <INDENT> self.z_dim = dim[2] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.z_dim = 1 <NEW_LINE> <DEDENT> self.version = "5" <NEW_LINE> if id is None: <NEW_LINE> <INDENT> self.id = '' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.id = id <NEW_LINE> <DEDENT> self.exe_return_filename = exe_return_filename <NEW_LINE> if weights_method == 'stdio': <NEW_LINE> <INDENT> self.weights_init = 'init_weights();' <NEW_LINE> <DEDENT> elif weights_method == 'direct': <NEW_LINE> <INDENT> self.weights_init = '' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception('Unimplemented')
Initialize the node. :param exe_return_filename: Name of file to write test results in. :param weights_method: The method how the weights are stored and initialized. 'direct': The weights are written into the C file. 'stdio': The weights are read using ANSI C stdio. :param prev_node: The previous node.
625941c28a43f66fc4b54008
def str2floatlist(str_values, expected_values): <NEW_LINE> <INDENT> import re <NEW_LINE> if str_values is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> values = re.findall("[-+]?\d+[\.]?\d*", str_values) <NEW_LINE> if len(values) < expected_values: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> for i in range(len(values)): <NEW_LINE> <INDENT> values[i] = float(values[i]) <NEW_LINE> <DEDENT> return values
Converts a string to a list of values. It returns None if the array is smaller than the expected size.
625941c2a17c0f6771cbdff3
def SetName(self, name): <NEW_LINE> <INDENT> self.__header.SetName(name)
Set the name of the histogram :param name: Name of the histogram :type name: String
625941c21f5feb6acb0c4af4
def move(servo, angle): <NEW_LINE> <INDENT> pulse_width = (MAX_PULSE - MIN_PULSE) * angle + MIN_PULSE <NEW_LINE> dc = pulse_width / (10.0 / PWM_FREQ) <NEW_LINE> servo.ChangeDutyCycle(dc) <NEW_LINE> return
Change servo rotation. Parameters: servo: running servo PWM instance angle: servo rotation. 0 = full CW, 1 = full CCW
625941c224f1403a92600b09
def random_search_log_reg(train, train_Y, test, save_path): <NEW_LINE> <INDENT> model = LogisticRegression() <NEW_LINE> C = uniform(loc=0, scale=4) <NEW_LINE> hyperparameters = dict(C=C) <NEW_LINE> clf = RandomizedSearchCV(model, hyperparameters, random_state=1, n_iter=10, cv=3, verbose=3, n_jobs=-1) <NEW_LINE> best_model = clf.fit(train, train_Y) <NEW_LINE> print('Best C:', best_model.best_estimator_.get_params()['C']) <NEW_LINE> predictions = best_model.predict_proba(test)[:, 1] <NEW_LINE> if save_path is not None: <NEW_LINE> <INDENT> save_pickle(save_path, best_model) <NEW_LINE> print("Log reg baseline model saved to: ", save_path) <NEW_LINE> <DEDENT> return best_model, predictions
Uses random search to tune the C parameter for logistic regression and make a prediction. :param train: Training data as array :param train_Y: Training labels :param test: Test data as array :param save_path: path to save model to - If None then model will not be saved :return: best model from last fold and predictions for the test data
625941c20a50d4780f666e31
def basicsetup(testcase): <NEW_LINE> <INDENT> testcase.workbook = AL_Excel.load_workbook(str(TESTFILE), data_only = True) <NEW_LINE> for sheet in DATA['SHEETS']: <NEW_LINE> <INDENT> setattr(testcase,sheet['alias'],testcase.workbook[sheet['name']])
Does basic workbook setup for the testcase
625941c25fdd1c0f98dc01d3
def get_bulk_works_details_iter(self, putcodes): <NEW_LINE> <INDENT> if not putcodes: <NEW_LINE> <INDENT> raise ValueError('putcodes can not be an empty sequence') <NEW_LINE> <DEDENT> for putcodes_chunk in utils.chunked_sequence_iter(putcodes, MAX_PUTCODES_PER_WORKS_DETAILS_REQUEST): <NEW_LINE> <INDENT> yield self._get_bulk_works_details(putcodes_chunk)
Yield a summary of the given works for the given orcid. A number of requests: GET https://api.orcid.org/v2.0/0000-0002-0942-3697/works/46674246 Args: putcode (List[string]): putcode. Yields: GetWorksDetailsResponse: the response. Note: This call can be very expensive for an author with many works (if each work also has many contributors). Fi. for an ATLAS author with ~750 works, 8 calls would be performed with a total data transfer > 0.5 Gb. Docs: https://members.orcid.org/api/tutorial/read-orcid-records#usetoken
625941c2187af65679ca50bf
def shortest_path_matrix(self): <NEW_LINE> <INDENT> if not self.src_inds: <NEW_LINE> <INDENT> self.src_inds = [self.vertex_count, self.vertex_count] <NEW_LINE> self.dst_inds = [np.random.randint(0, self.vertex_count), np.random.randint(0, self.vertex_count)] <NEW_LINE> self.vertex_count += 1 <NEW_LINE> <DEDENT> dist = np.zeros((self.vertex_count, self.vertex_count)) <NEW_LINE> src_inds = arr(self.src_inds) <NEW_LINE> dst_inds = arr(self.dst_inds) <NEW_LINE> try: <NEW_LINE> <INDENT> dist[src_inds, dst_inds] = dist[dst_inds, src_inds] = self.spread <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> start = 0 <NEW_LINE> slices = [] <NEW_LINE> iteration = 0 <NEW_LINE> self.msg.emit("Step 1 of 2: Computing shortest-path matrix...") <NEW_LINE> while start < self.vertex_count: <NEW_LINE> <INDENT> if self._stopped: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> self.progressed.emit(iteration) <NEW_LINE> stop = min(self.vertex_count, start + math.ceil(self.vertex_count / 10)) <NEW_LINE> slice_ = dijkstra(dist, directed=False, indices=range(start, stop)) <NEW_LINE> slices.append(slice_) <NEW_LINE> start = stop <NEW_LINE> iteration += 1 <NEW_LINE> <DEDENT> matrix = np.vstack(slices) <NEW_LINE> matrix[matrix == np.inf] = self.spread * self.vertex_count ** (0.5) <NEW_LINE> matrix[matrix == 0] = self.spread * 1e-6 <NEW_LINE> return matrix
Returns the shortest-path matrix.
625941c201c39578d7e74ddc
def show_autoeval(self, args): <NEW_LINE> <INDENT> self.msg("autoeval is %s." % self.get_autoeval()) <NEW_LINE> return False
Show if unrecognized command are evaluated
625941c2a219f33f3462890d
def clean_concepts(linkbases): <NEW_LINE> <INDENT> concepts_removed = [] <NEW_LINE> xlink = "{http://www.w3.org/1999/xlink}" <NEW_LINE> schema = linkbases["xsd"]["filename"].split("/")[-1] <NEW_LINE> href_xpath = ".//*[@{0}href='{1}#%s']".format(xlink, schema) <NEW_LINE> concepts_xpath = ".//{http://www.w3.org/2001/XMLSchema}element" <NEW_LINE> for concept in linkbases["xsd"]["root"].iterfind(concepts_xpath): <NEW_LINE> <INDENT> identifier = concept.get("id") <NEW_LINE> used = False <NEW_LINE> for key, val in linkbases.items(): <NEW_LINE> <INDENT> exists = val["root"].find(href_xpath % identifier) <NEW_LINE> if key != "xsd" and etree.iselement(exists): <NEW_LINE> <INDENT> used = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if not used: <NEW_LINE> <INDENT> linkbases["xsd"]["root"].remove(concept) <NEW_LINE> concepts_removed.append(identifier) <NEW_LINE> <DEDENT> <DEDENT> return concepts_removed
Searches through the provided dictionary of linkbases using the xsd to build a list of extension concepts. Then finds any that aren't referenced by the presentation, definition, calculation, or label linkbases and removes them.
625941c2be7bc26dc91cd5a4
def __init__(self): <NEW_LINE> <INDENT> self.interchangeable_qubit_indices = []
Initialize a basic gate. Note: Set interchangeable qubit indices! (gate.interchangeable_qubit_indices) As an example, consider .. code-block:: python ExampleGate | (a,b,c,d,e) where a and b are interchangeable. Then, call this function as follows: .. code-block:: python self.set_interchangeable_qubit_indices([[0,1]]) As another example, consider .. code-block:: python ExampleGate2 | (a,b,c,d,e) where a and b are interchangeable and, in addition, c, d, and e are interchangeable among themselves. Then, call this function as .. code-block:: python self.set_interchangeable_qubit_indices([[0,1],[2,3,4]])
625941c24d74a7450ccd4164
def write(self, data, waitForResponse=True, timeout=5, parseError=True, writeTerm='\r', expectedResponseTermSeq=None): <NEW_LINE> <INDENT> self.log.debug('write: %s', data) <NEW_LINE> responseLines = SerialComms.write(self, data + writeTerm, waitForResponse=waitForResponse, timeout=timeout, expectedResponseTermSeq=expectedResponseTermSeq) <NEW_LINE> if self._writeWait > 0: <NEW_LINE> <INDENT> time.sleep(self._writeWait) <NEW_LINE> <DEDENT> if waitForResponse: <NEW_LINE> <INDENT> cmdStatusLine = responseLines[-1] <NEW_LINE> if parseError: <NEW_LINE> <INDENT> if 'ERROR' in cmdStatusLine: <NEW_LINE> <INDENT> cmErrorMatch = self.CM_ERROR_REGEX.match(cmdStatusLine) <NEW_LINE> if cmErrorMatch: <NEW_LINE> <INDENT> errorType = cmErrorMatch.group(1) <NEW_LINE> errorCode = int(cmErrorMatch.group(2)) <NEW_LINE> if errorCode == 515 or errorCode == 14: <NEW_LINE> <INDENT> self._writeWait += 0.2 <NEW_LINE> self.log.debug('Device/SIM busy error detected; self._writeWait adjusted to %fs', self._writeWait) <NEW_LINE> time.sleep(self._writeWait) <NEW_LINE> result = self.write(data, waitForResponse, timeout, parseError, writeTerm, expectedResponseTermSeq) <NEW_LINE> self.log.debug('self_writeWait set to 0.1 because of recovering from device busy (515) error') <NEW_LINE> if errorCode == 515: <NEW_LINE> <INDENT> self._writeWait = 0.1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._writeWait = 0 <NEW_LINE> <DEDENT> return result <NEW_LINE> <DEDENT> if errorType == 'CME': <NEW_LINE> <INDENT> raise CmeError(data, int(errorCode)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise CmsError(data, int(errorCode)) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise CommandError(data) <NEW_LINE> <DEDENT> <DEDENT> elif cmdStatusLine == 'COMMAND NOT SUPPORT': <NEW_LINE> <INDENT> raise CommandError(data + '({0})'.format(cmdStatusLine)) <NEW_LINE> <DEDENT> <DEDENT> return responseLines
Write data to the modem This method adds the ' ' end-of-line sequence to the data parameter, and writes it to the modem @param data: Command/data to be written to the modem @type data: str @param waitForResponse: Whether this method should block and return the response from the modem or not @type waitForResponse: bool @param timeout: Maximum amount of time in seconds to wait for a response from the modem @type timeout: int @param parseError: If True, a CommandError is raised if the modem responds with an error (otherwise the response is returned as-is) @type parseError: bool @param writeTerm: The terminating sequence to append to the written data @type writeTerm: str @param expectedResponseTermSeq: The expected terminating sequence that marks the end of the modem's response (defaults to ' ') @type expectedResponseTermSeq: str @raise CommandError: if the command returns an error (only if parseError parameter is True) @raise TimeoutException: if no response to the command was received from the modem @return: A list containing the response lines from the modem, or None if waitForResponse is False @rtype: list
625941c2ad47b63b2c509f20
def setCenter(self, *args, **kwargs): <NEW_LINE> <INDENT> cen = galsim.utilities.parse_pos_args(args, kwargs, 'xcen', 'ycen', integer=True) <NEW_LINE> self._shift(cen - self.image.bounds.center())
Set the center of the image to the given (integral) (xcen, ycen) The arguments here may be either (xcen, ycen) or a PositionI instance. Or you can provide xcen, ycen as named kwargs.
625941c230bbd722463cbd65
def ht_radio_checked(option,select): <NEW_LINE> <INDENT> if option==select: return 'checked="checked"' <NEW_LINE> return ''
The description of ht_radio_checked comes here. @param option @param select @return
625941c2f9cc0f698b14059e
def calculate_mean_reversion_level(days_to_maturity, mean_reversion_rate, sigma, reference_curve): <NEW_LINE> <INDENT> year_fraction = days_to_maturity / 365 <NEW_LINE> forward_rate = reference_curve.instantaneous_forward_rate(days_to_maturity) <NEW_LINE> forward_rate_plus = reference_curve.instantaneous_forward_rate(days_to_maturity + 1.0) <NEW_LINE> forward_rate_minus = reference_curve.instantaneous_forward_rate(days_to_maturity - 1.0) <NEW_LINE> one_day = 1.0 / 365 <NEW_LINE> forward_rate_derivative = float(forward_rate_plus - forward_rate_minus) / (2.0 * one_day) <NEW_LINE> theta = 0 <NEW_LINE> theta += forward_rate_derivative <NEW_LINE> theta += mean_reversion_rate * forward_rate <NEW_LINE> theta += sigma * sigma * 1.0 / (2.0 * mean_reversion_rate) * (1.0 - exp(-mean_reversion_rate * year_fraction)) <NEW_LINE> return theta
:param days_to_maturity: :param mean_reversion_rate: :param sigma: :param reference_curve: :return:
625941c282261d6c526ab43d
def next_batch(num, data): <NEW_LINE> <INDENT> idx = np.arange(0, len(data)) <NEW_LINE> np.random.shuffle(idx) <NEW_LINE> idx = idx[:num] <NEW_LINE> data_shuffle = [data[i] for i in idx] <NEW_LINE> return np.asarray(data_shuffle)
Return a total of `num` random samples and labels.
625941c24c3428357757c2cb
def _group_by_type(documents, models=None): <NEW_LINE> <INDENT> doc_classes = {} <NEW_LINE> if models is not None: <NEW_LINE> <INDENT> doc_classes.update({model.__name__: model for model in models}) <NEW_LINE> <DEDENT> grouped = defaultdict(list) <NEW_LINE> for doc in documents: <NEW_LINE> <INDENT> if doc._type not in doc_classes: <NEW_LINE> <INDENT> doc_classes[doc._type] = engine.get_document_cls(doc._type) <NEW_LINE> <DEDENT> doc_cls = doc_classes[doc._type] <NEW_LINE> grouped[doc_cls].append(doc) <NEW_LINE> <DEDENT> return grouped
Group documents by document class. :param documents: List of documents to group. :param models: List models classes of documents. :returns: Dict of grouped documents of format {Model: [doc1, doc2, ...]}.
625941c2d58c6744b4257c01
def _get_last_checkpoint_no(self, root_path): <NEW_LINE> <INDENT> a = self.get_checkpoint_no(root_path) <NEW_LINE> if len(a) > 0: <NEW_LINE> <INDENT> return a[-1] <NEW_LINE> <DEDENT> return -1
only get the first depth
625941c2b545ff76a8913db7
def _feature_correct(self, features): <NEW_LINE> <INDENT> if not 'urn:xmpp:message-correct:0' in features: <NEW_LINE> <INDENT> if 'correct' in self.commands: <NEW_LINE> <INDENT> del self.commands['correct'] <NEW_LINE> <DEDENT> <DEDENT> elif not 'correct' in self.commands: <NEW_LINE> <INDENT> self.register_command('correct', self.command_correct, desc=_('Fix the last message with whatever you want.'), shortdesc=_('Correct the last message.'), completion=self.completion_correct) <NEW_LINE> <DEDENT> return 'correct' in self.commands
Check for the 'correction' feature
625941c299cbb53fe6792b88
def plotblossom(self, blossom_i): <NEW_LINE> <INDENT> i_hot = self.findhot(self.u_grid[blossom_i]) <NEW_LINE> u = linspace(self.u_grid[i_hot]*0.95, self.u_grid[i_hot+1]*1.05, 100) <NEW_LINE> d1 = zeros((100, 2)) <NEW_LINE> d2 = zeros((100, 2)) <NEW_LINE> d3 = zeros((100, 2)) <NEW_LINE> for j in range(0, 100): <NEW_LINE> <INDENT> [d_hotx, d_hoty] = self.blossom(self.d, u[j], i_hot) <NEW_LINE> d1x = d_hotx[1, 0] <NEW_LINE> d1y = d_hoty[1, 0] <NEW_LINE> d1[j, :] = (d1x, d1y) <NEW_LINE> d2x = d_hotx[2, 0] <NEW_LINE> d2y = d_hoty[2, 0] <NEW_LINE> d2[j, :] = (d2x, d2y) <NEW_LINE> d3x = d_hotx[3, 0] <NEW_LINE> d3y = d_hoty[3, 0] <NEW_LINE> d3[j, :] = (d3x, d3y) <NEW_LINE> <DEDENT> plt.plot(d1[:,0], d1[:,1], label='Linear blossom') <NEW_LINE> plt.plot(d2[:,0], d2[:,1], label='Quadratic blossom') <NEW_LINE> plt.plot(d3[:,0], d3[:,1], label='Final blossom') <NEW_LINE> plt.legend(loc = 'upper right')
Plots the blossom curves
625941c24e4d5625662d437b
def sort(n, integer=False): <NEW_LINE> <INDENT> x = ''.join(sorted(str(n))) <NEW_LINE> if integer: <NEW_LINE> <INDENT> return(int(x)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return(x)
Takes a string/integer as input and lexicographically sorts it. If integer is True, it returns it as integer type.
625941c22ae34c7f2600d0d3
def get_device_name(self, device): <NEW_LINE> <INDENT> parts = device.split("_") <NEW_LINE> mac = parts[0] <NEW_LINE> ap_mac = None <NEW_LINE> if len(parts) > 1: <NEW_LINE> <INDENT> ap_mac = parts[1] <NEW_LINE> <DEDENT> name = None <NEW_LINE> for dev in self.last_results: <NEW_LINE> <INDENT> if dev.mac == mac: <NEW_LINE> <INDENT> name = dev.name <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if not name or name == "--": <NEW_LINE> <INDENT> name = mac <NEW_LINE> <DEDENT> if ap_mac: <NEW_LINE> <INDENT> ap_name = "Router" <NEW_LINE> for dev in self.last_results: <NEW_LINE> <INDENT> if dev.mac == ap_mac: <NEW_LINE> <INDENT> ap_name = dev.name <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> return f"{name} on {ap_name}" <NEW_LINE> <DEDENT> return name
Return the name of the given device or the MAC if we don't know.
625941c21f037a2d8b94619f
def _add_bond( self, atom1, atom2, bond_order, is_aromatic, stereochemistry=None, fractional_bond_order=None, ): <NEW_LINE> <INDENT> if isinstance(atom1, int) and isinstance(atom2, int): <NEW_LINE> <INDENT> atom1_atom = self.atoms[atom1] <NEW_LINE> atom2_atom = self.atoms[atom2] <NEW_LINE> <DEDENT> elif isinstance(atom1, Atom) and isinstance(atom2, Atom): <NEW_LINE> <INDENT> atom1_atom = atom1 <NEW_LINE> atom2_atom = atom2 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception( "Invalid inputs to molecule._add_bond. Expected ints or Atoms. " "Received {} (type {}) and {} (type {}) ".format( atom1, type(atom1), atom2, type(atom2) ) ) <NEW_LINE> <DEDENT> if atom1_atom.is_bonded_to(atom2_atom): <NEW_LINE> <INDENT> raise Exception( "Bond already exists between {} and {}".format(atom1_atom, atom2_atom) ) <NEW_LINE> <DEDENT> bond = Bond( atom1_atom, atom2_atom, bond_order, is_aromatic, stereochemistry=stereochemistry, fractional_bond_order=fractional_bond_order, ) <NEW_LINE> self._bonds.append(bond) <NEW_LINE> self._invalidate_cached_properties() <NEW_LINE> return self._bonds.index(bond)
Add a bond between two specified atom indices Parameters ---------- atom1 : int or openff.toolkit.topology.molecule.Atom Index of first atom or first atom atom2_index : int or openff.toolkit.topology.molecule.Atom Index of second atom or second atom bond_order : int Integral bond order of Kekulized form is_aromatic : bool True if this bond is aromatic, False otherwise stereochemistry : str, optional, default=None Either 'E' or 'Z' for specified stereochemistry, or None if stereochemistry is irrelevant fractional_bond_order : float, optional, default=None The fractional (eg. Wiberg) bond order Returns ------- index : int The index of the bond in the molecule
625941c23eb6a72ae02ec479
def test_type(self): <NEW_LINE> <INDENT> assert is_consistent_type(X509Req, 'X509Req')
`X509Req` can be used to create instances of that type.
625941c2287bf620b61d3a06
def _cleanup_namespaces(self, router_namespaces, router_ids): <NEW_LINE> <INDENT> ns_to_ignore = set(NS_PREFIX + id for id in router_ids) <NEW_LINE> ns_to_ignore.update(SNAT_NS_PREFIX + id for id in router_ids) <NEW_LINE> ns_to_destroy = router_namespaces - ns_to_ignore <NEW_LINE> self._destroy_stale_router_namespaces(ns_to_destroy)
Destroy stale router namespaces on host when L3 agent restarts This routine is called when self._clean_stale_namespaces is True. The argument router_namespaces is the list of all routers namespaces The argument router_ids is the list of ids for known routers.
625941c266656f66f7cbc14b
def __hash__(self): <NEW_LINE> <INDENT> raise TypeError('%s is unhashable' % self.__class__.__name__)
Disallow hashing StateStats since they are mutable by design.
625941c2d486a94d0b98e0e6
def deserialize(self, str): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> end = 0 <NEW_LINE> _x = self <NEW_LINE> start = end <NEW_LINE> end += 5 <NEW_LINE> (_x.pin, _x.state,) = _struct_Bf.unpack(str[start:end]) <NEW_LINE> return self <NEW_LINE> <DEDENT> except struct.error as e: <NEW_LINE> <INDENT> raise genpy.DeserializationError(e)
unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str``
625941c263b5f9789fde7086
def test_add_word(self): <NEW_LINE> <INDENT> email = "cordelia@zulip.com" <NEW_LINE> user = get_user_profile_by_email(email) <NEW_LINE> add_user_alert_words(user, self.interesting_alert_word_list) <NEW_LINE> words = user_alert_words(user) <NEW_LINE> self.assertEqual(words, self.interesting_alert_word_list)
add_user_alert_words can add multiple alert words at once.
625941c2bf627c535bc13170
def warning(x, *args, **kargs): <NEW_LINE> <INDENT> if kargs.pop("onlyOnce", False): <NEW_LINE> <INDENT> from scapy.config import conf <NEW_LINE> conf.warning_next_only_once = True <NEW_LINE> <DEDENT> log_runtime.warning(x, *args, **kargs)
Prints a warning during runtime. onlyOnce - if True, the warning will never be printed again.
625941c2bd1bec0571d905d0
def Fabber(*search_dirs, **kwargs): <NEW_LINE> <INDENT> corelib, coreexe, libs, exes = find_fabber(*search_dirs, **kwargs) <NEW_LINE> if corelib and kwargs.get("shlib"): <NEW_LINE> <INDENT> return FabberShlib(core_lib=corelib, model_libs=libs, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return FabberCl(core_exe=coreexe, model_exes=exes, **kwargs)
Get an API object for Fabber. Uses the shared lib API if available and requested using `shlib=True`, otherwise use command line wrappers :param extra_search_dirs: Extra search directories to use to look for Fabber libraries and executables
625941c257b8e32f5248343b
def hard_plastic_net(X, y, xi, lambda_total=1.0, alpha=0.75, tol=1e-8, max_iter=1000): <NEW_LINE> <INDENT> N, D = X.shape <NEW_LINE> beta = np.zeros(D, dtype=np.float64) <NEW_LINE> r = y - np.dot(X, beta) <NEW_LINE> hard_plastic_net_( beta, r, X, xi, lambda_total=lambda_total, alpha=alpha, tol=tol, max_iter=max_iter, ) <NEW_LINE> return beta
Hard plastic net regression. This function finds the :math:`\vec{\beta}` that minimizes .. math:: \tfrac{1}{2N} ||\vec{y}-X\vec{\beta}||_2^2 + \lambda \bigl( \alpha||\vec{\beta}-\vec{\xi}||_1 + (1-\alpha) \tfrac{1}{2} ||\vec{\beta}||_2^2 \bigr) Args: X (numpy.ndarray): shape (N,P) data matrix. y (numpy.ndarray): shape (N,) target vector. xi (numpy.ndarray): shape (P,) target for L1 penalty. lambda_total (float): must be non-negative. total regularization penalty strength. alpha (float): mixing parameter between L1 and L1 penalties. must be between zero and one. :math:`\alpha=0` is pure L2 penalty, :math:`\alpha=1` is pure L1 penalty. tol (float): convergence criterion for coordinate descent. coordinate descent runs until the maximum element-wise change in **beta** is less than **tol**. max_iter (int): maximum number of update passes through all P elements of **beta**, in case **tol** is never met. Returns: (numpy.ndarray): shape (D,) coefficient vector.
625941c2498bea3a759b9a51
def generate_record(): <NEW_LINE> <INDENT> json_data = json.loads(pkg_resources.resource_string( __name__, os.path.join( '../fixtures', 'oai_arxiv_core_record.json' ) )) <NEW_LINE> if 'preprint_date' in json_data: <NEW_LINE> <INDENT> json_data['preprint_date'] = datetime.date.today().isoformat() <NEW_LINE> <DEDENT> return json_data
Provide record fixtures.
625941c26aa9bd52df036d44
def set_formula(self, formula): <NEW_LINE> <INDENT> self.formula = formula.replace(' ', '').replace('\n', '').replace('\r', '')
Store the input formula as the one to evaluate on.
625941c2cb5e8a47e48b7a4e
def get_values(self): <NEW_LINE> <INDENT> return self.as_matrix()
same as values (but handles sparseness conversions)
625941c210dbd63aa1bd2b45
def cancel_train_network(self,event): <NEW_LINE> <INDENT> self.config = [] <NEW_LINE> self.sel_config.SetPath("") <NEW_LINE> self.pose_cfg_text.Hide() <NEW_LINE> self.update_params_text.Hide() <NEW_LINE> self.pose_cfg_choice.SetSelection(1) <NEW_LINE> self.display_iters.SetValue(1000) <NEW_LINE> self.save_iters.SetValue(50000) <NEW_LINE> self.max_iters.SetValue(103000) <NEW_LINE> self.snapshots.SetValue(5) <NEW_LINE> self.SetSizer(self.sizer) <NEW_LINE> self.sizer.Fit(self)
Reset to default
625941c22c8b7c6e89b35763
def HasMultilineSelection(self): <NEW_LINE> <INDENT> bMulti = False <NEW_LINE> sel = super().GetSelection() <NEW_LINE> if sel[0] != sel[1]: <NEW_LINE> <INDENT> sline = self.LineFromPosition(sel[0]) <NEW_LINE> eline = self.LineFromPosition(sel[1]) <NEW_LINE> bMulti = sline != eline <NEW_LINE> <DEDENT> return bMulti
Is the selection over multiple lines? @return: bool
625941c263f4b57ef00010c0
def date_time_input_format(value, tz_str): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> tz = pytz.timezone(tz_str) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> tz = pytz.utc <NEW_LINE> <DEDENT> value = value.replace(tzinfo=pytz.utc).astimezone(tz) <NEW_LINE> return value.strftime("%m-%d-%Y %I:%M %p")
Take a date time object and convert it into a string that uses the required input box format. Note, this format MUST match the format used in the get_utc_datetime_from_user_input function.
625941c2ac7a0e7691ed4072
def try_remove(self, key: KeyType, timeout: float = 0) -> Future[bool]: <NEW_LINE> <INDENT> check_not_none(key, "key can't be None") <NEW_LINE> key_data = self._to_data(key) <NEW_LINE> return self._try_remove_internal(key_data, timeout)
Tries to remove the given key from this map and returns immediately if timeout is not provided. If timeout is provided, operation waits until it is completed or timeout is reached. Args: key: Key of the entry to be deleted. timeout (float): Maximum time in seconds to wait. Returns: Future[bool]: ``True`` if the remove is successful, ``False`` otherwise.
625941c2d164cc6175782cef
def get_dimensions(is_key=True, track_menu=False): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> dimensions = additional_json['ItemVariations']['itmVarModel']['menuModels'] <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> dimensions = [] <NEW_LINE> <DEDENT> dimensions_dict = {} <NEW_LINE> dimensions_list = [] <NEW_LINE> for menu in dimensions: <NEW_LINE> <INDENT> dimension = menu.get('displayName', '') <NEW_LINE> dimension = dimension.replace(' ', '_').lower() if is_key else dimension <NEW_LINE> if track_menu == True: <NEW_LINE> <INDENT> dimensions_dict[dimension] = menu.get('menuItemValueIds') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dimensions_list.append(dimension) <NEW_LINE> <DEDENT> <DEDENT> if track_menu: <NEW_LINE> <INDENT> return dimensions_dict <NEW_LINE> <DEDENT> return dimensions_list
is_key=False ['Size', 'Color', 'Macam macam'] is_key=True ['size', 'color', 'macam_macam'] track_menu=True {'size': [1, 2, 3, 4], 'color': [5, 6, 7]}
625941c2293b9510aa2c3239
def test_feedbooks(self): <NEW_LINE> <INDENT> self._login() <NEW_LINE> response = self.client.get('/library/') <NEW_LINE> assert 'feedbooks.com/book' in response.content
Test that we get a list of feedbooks books from the main page
625941c2a8370b7717052842
def create_shell_stream(self, kernel_id): <NEW_LINE> <INDENT> shell_stream = self._create_connected_stream(kernel_id, zmq.DEALER, 'shell') <NEW_LINE> return shell_stream
Return a ZMQStream object connected to the shell channel. Parameters ========== kernel_id : uuid The id of the kernel. Returns ======= stream : ZMQStream
625941c299fddb7c1c9de333
def rbridge_id_interface_ve_ip_igmp_last_member_query_interval(**kwargs): <NEW_LINE> <INDENT> config = ET.Element("config") <NEW_LINE> rbridge_id = ET.SubElement(config, "rbridge-id", xmlns="urn:brocade.com:mgmt:brocade-rbridge") <NEW_LINE> if kwargs.pop('delete_rbridge_id', False) is True: <NEW_LINE> <INDENT> delete_rbridge_id = config.find('.//*rbridge-id') <NEW_LINE> delete_rbridge_id.set('operation', 'delete') <NEW_LINE> <DEDENT> rbridge_id_key = ET.SubElement(rbridge_id, "rbridge-id") <NEW_LINE> rbridge_id_key.text = kwargs.pop('rbridge_id') <NEW_LINE> if kwargs.pop('delete_rbridge_id', False) is True: <NEW_LINE> <INDENT> delete_rbridge_id = config.find('.//*rbridge-id') <NEW_LINE> delete_rbridge_id.set('operation', 'delete') <NEW_LINE> <DEDENT> interface = ET.SubElement(rbridge_id, "interface", xmlns="urn:brocade.com:mgmt:brocade-interface") <NEW_LINE> if kwargs.pop('delete_interface', False) is True: <NEW_LINE> <INDENT> delete_interface = config.find('.//*interface') <NEW_LINE> delete_interface.set('operation', 'delete') <NEW_LINE> <DEDENT> ve = ET.SubElement(interface, "ve") <NEW_LINE> if kwargs.pop('delete_ve', False) is True: <NEW_LINE> <INDENT> delete_ve = config.find('.//*ve') <NEW_LINE> delete_ve.set('operation', 'delete') <NEW_LINE> <DEDENT> name_key = ET.SubElement(ve, "name") <NEW_LINE> name_key.text = kwargs.pop('name') <NEW_LINE> if kwargs.pop('delete_name', False) is True: <NEW_LINE> <INDENT> delete_name = config.find('.//*name') <NEW_LINE> delete_name.set('operation', 'delete') <NEW_LINE> <DEDENT> ip = ET.SubElement(ve, "ip", xmlns="urn:brocade.com:mgmt:brocade-ip-config") <NEW_LINE> if kwargs.pop('delete_ip', False) is True: <NEW_LINE> <INDENT> delete_ip = config.find('.//*ip') <NEW_LINE> delete_ip.set('operation', 'delete') <NEW_LINE> <DEDENT> igmp = ET.SubElement(ip, "igmp", xmlns="urn:brocade.com:mgmt:brocade-igmp") <NEW_LINE> if kwargs.pop('delete_igmp', False) is True: <NEW_LINE> <INDENT> delete_igmp = config.find('.//*igmp') <NEW_LINE> delete_igmp.set('operation', 'delete') <NEW_LINE> <DEDENT> last_member_query_interval = ET.SubElement(igmp, "last-member-query-interval") <NEW_LINE> if kwargs.pop('delete_last_member_query_interval', False) is True: <NEW_LINE> <INDENT> delete_last_member_query_interval = config.find('.//*last-member-query-interval') <NEW_LINE> delete_last_member_query_interval.set('operation', 'delete') <NEW_LINE> <DEDENT> last_member_query_interval.text = kwargs.pop('last_member_query_interval') <NEW_LINE> callback = kwargs.pop('callback', _callback) <NEW_LINE> return callback(config, mgr=kwargs.pop('mgr'))
Auto Generated Code
625941c2009cb60464c63355
def test_not_current(self): <NEW_LINE> <INDENT> self.assert_selector( self.MARKUP, "p:not(:current)", ["0"], flags=util.HTML )
Test not current.
625941c23d592f4c4ed1d014
def create_batch(self, params): <NEW_LINE> <INDENT> return json.loads(self.http_post(params, self.ENDPOINT_BATCHES))
Returns: JSON with the formatted response content
625941c21d351010ab855abe
def test_webhook_timeout_exception_on_http_response(self): <NEW_LINE> <INDENT> self.conf_override(group='webhook_notifier', timeout=50) <NEW_LINE> self.notify(self._http_post_exception) <NEW_LINE> result = self.trap.get() <NEW_LINE> self.assertEqual(result, "timeout 50") <NEW_LINE> result = self.trap.get() <NEW_LINE> self.assertNotRegex(result, "alarm_id.: .test Alarm") <NEW_LINE> self.assertNotRegex(result, "content-type.: .application/json") <NEW_LINE> self.assertRegex(result, "Error trying to post on URL http://mock:3333/") <NEW_LINE> return_value = self.trap.get() <NEW_LINE> self.assertFalse(return_value)
webhook timeout exception
625941c2b5575c28eb68dfa0
def check_keyup_events(event, paddle_top, paddle_bottom, paddle_left): <NEW_LINE> <INDENT> if event.key == pygame.K_UP: <NEW_LINE> <INDENT> paddle_left.moving_up = False <NEW_LINE> <DEDENT> elif event.key == pygame.K_DOWN: <NEW_LINE> <INDENT> paddle_left.moving_down = False <NEW_LINE> <DEDENT> if event.key == pygame.K_RIGHT: <NEW_LINE> <INDENT> paddle_top.moving_right = False <NEW_LINE> paddle_bottom.moving_right = False <NEW_LINE> <DEDENT> elif event.key == pygame.K_LEFT: <NEW_LINE> <INDENT> paddle_top.moving_left = False <NEW_LINE> paddle_bottom.moving_left = False
Respond to key releases.
625941c29f2886367277a830
def _tabell(self): <NEW_LINE> <INDENT> tabell_root = tk.Toplevel(self) <NEW_LINE> tabell_vindu = Tabell(tabell_root)
Oppretter vindu for tabell.
625941c2cc40096d615958f3
def svm_loss_vectorized(W, X, y, reg): <NEW_LINE> <INDENT> loss = 0.0 <NEW_LINE> dW = np.zeros(W.shape) <NEW_LINE> delta = 1.0 <NEW_LINE> num_train = X.shape[0] <NEW_LINE> scores = X.dot(W) <NEW_LINE> correct_class_score = scores[np.arange(num_train), y] <NEW_LINE> margins = np.maximum(0, scores - correct_class_score[:, np.newaxis] + delta) <NEW_LINE> margins[np.arange(num_train), y] = 0 <NEW_LINE> loss = np.sum(margins) / num_train <NEW_LINE> loss += reg * np.sum(W * W) <NEW_LINE> X_mask = np.zeros(margins.shape) <NEW_LINE> X_mask[margins > 0] = 1 <NEW_LINE> incorrect_counts = np.sum(X_mask, axis=1) <NEW_LINE> X_mask[np.arange(num_train), y] = -incorrect_counts <NEW_LINE> dW = X.T.dot(X_mask) <NEW_LINE> dW /= num_train <NEW_LINE> dW += 2 * reg * W <NEW_LINE> return loss, dW
Structured SVM loss function, vectorized implementation. Inputs and outputs are the same as svm_loss_naive.
625941c26fb2d068a760f03d
def creation_date(path_to_file, return_datetime=True): <NEW_LINE> <INDENT> if platform.system() == 'Windows': <NEW_LINE> <INDENT> created_at = os.path.getctime(path_to_file) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stat = os.stat(path_to_file) <NEW_LINE> try: <NEW_LINE> <INDENT> created_at = stat.st_birthtime <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> created_at = stat.st_mtime <NEW_LINE> <DEDENT> <DEDENT> if return_datetime: <NEW_LINE> <INDENT> return datetime.fromtimestamp(created_at) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return created_at
Retrieve a file's creation date. Try to get the date that a file was created, falling back to when it was last modified if that isn't possible. See http://stackoverflow.com/a/39501288/1709587 for explanation. :param path_to_file: File path :param return_datetime: Bool, returns value in Datetime format :return: Creation date
625941c28e7ae83300e4af6e
def log_sum_exp(x): <NEW_LINE> <INDENT> x_max = x.data.max() <NEW_LINE> return torch.log(torch.sum(torch.exp(x-x_max), -1, keepdim=True)) + x_max
Utility function for computing log_sum_exp while determining This will be used to determine unaveraged confidence loss across all examples in a batch. Args: x (Variable(tensor)): conf_preds from conf layers
625941c2377c676e9127214b
def upload_S3(self, bucket, obj): <NEW_LINE> <INDENT> return self._upload_S3(bucket, obj)
Upload JSON-encoded object to S3.
625941c2bde94217f3682d95
def another_shape(r, m): <NEW_LINE> <INDENT> for k in range (r): <NEW_LINE> <INDENT> for _ in range (k): <NEW_LINE> <INDENT> print(' ', end='') <NEW_LINE> <DEDENT> for _ in range (m - k): <NEW_LINE> <INDENT> print('+', end='') <NEW_LINE> <DEDENT> print('!', end='') <NEW_LINE> if k % 2 == 0: <NEW_LINE> <INDENT> for j in range (m - k): <NEW_LINE> <INDENT> print(j + 1, end='') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for j in range (m - k, 0, -1): <NEW_LINE> <INDENT> print(j, end='') <NEW_LINE> <DEDENT> <DEDENT> print()
Prints a shape with r rows that looks like this example in which r = 5 and m = 8: ++++++++!12345678 +++++++!7654321 ++++++!123456 +++++!54321 ++++!1234 Note that the numbers in rows 1, 3, 5, ... always start at 1 and INCREASE, while the numbers in rows 2, 4, 6, ... DECREASE and always END at 1. Also, the number of + symbols in the first row always equals m. Here is another example in which r = 4 and m = 6 ++++++!123456 +++++!54321 ++++!1234 +++!321 Yet one more example, in which r=7 and m=7: +++++++!1234567 ++++++!654321 +++++!12345 ++++!4321 +++!123 ++!21 +!1 Preconditions: r and m are positive integers with r <= m. For purposes of "lining up", assume m and n are single digits.
625941c25fc7496912cc3920
def color_print(category, level, msg): <NEW_LINE> <INDENT> if level <= PRINT_LEVEL: <NEW_LINE> <INDENT> print(msg) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass
print colorized console output
625941c2097d151d1a222dfd
def no_hyphen_at_end_of_rand_name(logical_line, filename): <NEW_LINE> <INDENT> if './tempest/api/network/' in filename: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> msg = "T108: hyphen should not be specified at the end of rand_name()" <NEW_LINE> if RAND_NAME_HYPHEN_RE.match(logical_line): <NEW_LINE> <INDENT> return 0, msg
Check no hyphen at the end of rand_name() argument T108
625941c22c8b7c6e89b35764
def play_card(self): <NEW_LINE> <INDENT> print('Not implemented!') <NEW_LINE> raise NotImplementedError()
Play one card.
625941c226238365f5f0ee0e
def __init__(self, limbSequence, pose_loc, width, height): <NEW_LINE> <INDENT> self.limbSequence = limbSequence <NEW_LINE> self.nlimbSeq = len(limbSequence) <NEW_LINE> self.canvas = np.zeros((self.nlimbSeq * 2, height, width)).astype('float32') <NEW_LINE> self.ncp = np.zeros((self.nlimbSeq, height, width)) <NEW_LINE> self.pose = pose_loc <NEW_LINE> self.w_canvas = width <NEW_LINE> self.h_canvas = height
:param limbSequence: maybe multi lists, deal with it one by one :param pose_loc: sepcial for coco 3*19 :param width: :param height:
625941c263d6d428bbe44491
def __repr__(self): <NEW_LINE> <INDENT> s = 'Words:\n' + str(self.words) + '\n\n' <NEW_LINE> s += 'Word lengths:\n' + str(self.wordlengths) + '\n\n' <NEW_LINE> s += 'Stems:\n' + str(self.stems) + '\n\n' <NEW_LINE> s += 'Sentence lengths:\n' + str(self.sentencelengths) + '\n\n' <NEW_LINE> s += 'Punctuation:\n' + str(self.punct) <NEW_LINE> return s
Display the contents of a TextModel.
625941c230c21e258bdfa43d
def getInitiative(self): <NEW_LINE> <INDENT> initiative = self.agilityModifier <NEW_LINE> if "Speed of the cobra" in self.luckySign: <NEW_LINE> <INDENT> initiative += self.luckModifier <NEW_LINE> <DEDENT> return initiative
Return the dccZeroLevelChar initiative modifier.
625941c2e8904600ed9f1ecd
def test_save_model_network(self): <NEW_LINE> <INDENT> X_train, X_test, y_train, y_test = modelling.transform_data(self.data, 10) <NEW_LINE> results, model = modelling.evaluate_model(X_train, X_test, y_train, y_test, 128, 1) <NEW_LINE> modelling.save_model(model, self.redis) <NEW_LINE> result = sorted(self.redis.keys()) <NEW_LINE> self.assertEqual(result[0].decode("UTF-8"), "{}_model".format(self.date))
Test if the architecture is saved.
625941c23346ee7daa2b2d0c
def __init__(self, func, func_group, has_asm_impl=False): <NEW_LINE> <INDENT> self.yaml_declaration = func["declaration"] <NEW_LINE> self.java_documentation = func_group["java_documentation"] <NEW_LINE> self.c_documentation = func_group["c_documentation"] <NEW_LINE> self.default_impl_template = func_group["default_implementation_template"] <NEW_LINE> self.name, self.op, args_str = self.yaml_declaration.partition(" ") <NEW_LINE> name_parts = self.name.split("_") <NEW_LINE> input_types_encoded = self._separate_args_in_name(name_parts[2]) <NEW_LINE> output_type_encoded = self._separate_args_in_name(name_parts[3]) <NEW_LINE> args_arr = args_str.split(",") <NEW_LINE> args_arr = [ a.lstrip().rstrip() for a in args_arr ] <NEW_LINE> self.arguments = [] <NEW_LINE> self._parse_arg_types(args_arr, input_types_encoded, output_type_encoded) <NEW_LINE> self.asm_impl_metadata = [] <NEW_LINE> self.dispatch_table_generator = DispatchTableGenerator(self.name, self.arguments) <NEW_LINE> self.default_impl_generator = DefaultImplementationGenerator(self.name, self.arguments, self.default_impl_template) <NEW_LINE> self.unit_test_generator = UnitTestGenerator(self.name, self.arguments)
Initialize the function, setting the function name, its documentation, template for default implementation, and creating an array of Argument objects for the functions arguments :param func A dictionary from the spec file containing the declaration :param func_group The group the function is in: groups subsets of specific operations, e.g all Add functions which take a vector and a scalar and output a vector :param has_asm_impl Whether or not this function has an assembly implementation (used in generating dispatch tables)
625941c23617ad0b5ed67e9b
def destroy(self, request, pk=None): <NEW_LINE> <INDENT> instance = Block.objects.get(blocked=pk, blocker=self.request.user) <NEW_LINE> self.perform_destroy(instance) <NEW_LINE> return Response(status=status.HTTP_204_NO_CONTENT)
API endpoint that allows blocks to be deleted.
625941c266673b3332b92033
def createFullCorpus(self, conversations): <NEW_LINE> <INDENT> self.padToken = self.getWordId('<pad>') <NEW_LINE> self.goToken = self.getWordId('<go>') <NEW_LINE> self.eosToken = self.getWordId('<eos>') <NEW_LINE> self.unknownToken = self.getWordId('<unknown>') <NEW_LINE> for conversation in tqdm(conversations, desc='Extract conversations'): <NEW_LINE> <INDENT> self.extractConversation(conversation)
Extract all data from the given vocabulary. Save the data on disk. Note that the entire corpus is pre-processed without restriction on the sentence lenght or vocab size.
625941c2ec188e330fd5a745
def mk_diffByIdxtup_keyfunc_closure(sampledata): <NEW_LINE> <INDENT> def diff_keyfunc(idxtup): <NEW_LINE> <INDENT> ri, qi = idxtup <NEW_LINE> cpvalue = sampledata[ri][qi] <NEW_LINE> all_other_points = [cp_other for ri_other, repdata_other in list(sampledata.items()) for qi_other, cp_other in enumerate(repdata_other) if not (ri_other == ri and qi_other == qi) ] <NEW_LINE> mean_other = np.mean(all_other_points) <NEW_LINE> diff = np.abs(mean_other-cpvalue) <NEW_LINE> std_other = np.std(all_other_points) <NEW_LINE> return diff-std_other*stdevlimit <NEW_LINE> <DEDENT> return diff_keyfunc
Takes a repdata ordereddict: repdata[ri][qi] = ctvalue where ri = replicate index, qi = qpcr-index, ctvalue = qpcr ct measurement. Returns a function which for a particular (ri, qi) tuple calculates how far the corresponding ct value is from the remaining measurements. Question: What is the criteria for which samplepoint is worst (furthest from the others): a) A point which has a ct value that lies far away from the mean of the other points? b) A point with a ct value that lies far outside the stdev of the other points? Is there a difference? Well, the result is most likely the same in most cases. If removing one point changes the stdev a lot more than removing the other point? In that case, we would usually think the first point was also further away from the mean (ctdiff) than the other point. Consider: [20, 30, 30] However, the reason for doing the stdev-based calculation is that now that we have the "other" points, it is easier to calculate stdev and normalize now than to do it again later. One option, then is to return (diff-limit), which has unit of ct value: mean_other = np.mean(all_other_points) diff = np.abs(mean_other-cpvalue) std_other = np.std(all_other_points) # There is a chance that stdev is zero. limit = std_other*stdevlimit+cpdiffconst return diff-limit If this is larger than 0, the point can be discarted. Alternatively, return a value in relative standard deviations (beyond the absolute cpdiffconst): std_other = np.std(all_other_points) # There is a chance that stdev is zero. mean_other = np.mean(all_other_points) diff = np.abs(mean_other-cpvalue) return (diff-cpdiffconst)/std_other If the returned value is larger than stdevlimit, then the returned point can be discarted. I prefer the latter because it does not include stdevlimit in the keyfunc closure. (1 line less) Edit: Due to the chance that std is zero, I think I prefer this: mean_other = np.mean(all_other_points) diff = np.abs(mean_other-cpvalue) std_other = np.std(all_other_points) # There is a chance that stdev is zero. return diff-std_other*stdevlimit If the returned value is larger than cpdiffconst, then the point can be discarted.
625941c2ec188e330fd5a744
def test_simple_project(self): <NEW_LINE> <INDENT> args = ['startproject', 'testproject'] <NEW_LINE> testproject_dir = os.path.join(test_dir, 'testproject') <NEW_LINE> out, err = self.run_django_admin(args) <NEW_LINE> self.addCleanup(shutil.rmtree, testproject_dir) <NEW_LINE> self.assertNoOutput(err) <NEW_LINE> self.assertTrue(os.path.isdir(testproject_dir)) <NEW_LINE> out, err = self.run_django_admin(args) <NEW_LINE> self.assertNoOutput(out) <NEW_LINE> self.assertOutput(err, "already exists")
Make sure the startproject management command creates a project
625941c2fb3f5b602dac3633
def setStartPoint(self, startPoint): <NEW_LINE> <INDENT> assert ((self.m_endPoint is not None) and (startPoint.getNode().getOwnerDocument() == self.m_document)) <NEW_LINE> self.m_startPoint = startPoint <NEW_LINE> self.m_range = None
Sets just the start point of the range. New startPoint must reside within the same document as the current endpoint, and must occur before it. @param startPoint New start point for this range
625941c23346ee7daa2b2d0d
def search(searchTerm, artistName=None): <NEW_LINE> <INDENT> searchTerm = urllib.parse.quote(searchTerm) <NEW_LINE> searchTag = "q=track:" + searchTerm <NEW_LINE> if artistName is not None: <NEW_LINE> <INDENT> if "feat." in artistName: <NEW_LINE> <INDENT> artistName = artistName[:artistName.find('feat.')] <NEW_LINE> <DEDENT> elif "ft." in artistName: <NEW_LINE> <INDENT> artistName = artistName[:artistName.find('ft.')] <NEW_LINE> <DEDENT> searchTag = searchTag + "%20artist:" + urllib.parse.quote(artistName) <NEW_LINE> <DEDENT> searchuri = "https://api.spotify.com/v1/search?" + searchTag + "&type=track" <NEW_LINE> js = json.loads(urlopen(searchuri).read().decode('utf-8')) <NEW_LINE> songs = js['tracks']['items'] <NEW_LINE> song = songs[0] <NEW_LINE> artistNames = [] <NEW_LINE> for artist in song['artists']: <NEW_LINE> <INDENT> artistNames.append(artist['name'].lower()) <NEW_LINE> <DEDENT> if song['name'].lower() != searchTerm and artistName not in artistNames: <NEW_LINE> <INDENT> for track in songs: <NEW_LINE> <INDENT> artistNames = [] <NEW_LINE> for artist in song['artists']: <NEW_LINE> <INDENT> artistNames.append(artist['name'].lower()) <NEW_LINE> <DEDENT> if artist in artistNames: <NEW_LINE> <INDENT> song = track <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> songName = song['name'] <NEW_LINE> trackUri = song['uri'] <NEW_LINE> artistUri = [] <NEW_LINE> for artist in song['artists']: <NEW_LINE> <INDENT> artistUri.append(artist['id']) <NEW_LINE> <DEDENT> albumUri = song['album']['uri'] <NEW_LINE> postArg = { "source": "SPOTIFY", "type": "uri", "id": trackUri, "account": "theacex", "name": songName, "artistId": artistUri[0] } <NEW_LINE> return postArg
~ Searches for the given search term using the spotify API ~ :param searchTerm: the term to be searched. Ex: Beat it :param artistName: if given, the artist name to make the search more accurate. Ex: Michael Jackson :return: A dictionary containing the information and spotify link of the song (if it is found) Using the Spotify API, the song is acquired by receiving a JSON file and parsing through that to get the searchURI link, as well as a link to the song on spotify.
625941c230dc7b766590190a
def intersection(self, nums1, nums2): <NEW_LINE> <INDENT> res = [] <NEW_LINE> if nums1 == [] or nums2 == []: <NEW_LINE> <INDENT> return res <NEW_LINE> <DEDENT> for i in nums1: <NEW_LINE> <INDENT> if i in nums2 and i not in res: <NEW_LINE> <INDENT> res.append(i) <NEW_LINE> <DEDENT> <DEDENT> return res
:type nums1: List[int] :type nums2: List[int] :rtype: List[int]
625941c238b623060ff0ad90
def _calculate_md5_handler(data): <NEW_LINE> <INDENT> function_call = utils.generate_md5_hash_from_string <NEW_LINE> try: <NEW_LINE> <INDENT> payload_value = _data_handler(function_call, data, required_keys=(enums.DATA_KEY,)) <NEW_LINE> return {enums.MD5_KEY: payload_value}, enums.HTTP_SUCCESS <NEW_LINE> <DEDENT> except Exception as err: <NEW_LINE> <INDENT> return _data_error_handler(function_call, err)
Default handler to calculate md5 requests Receives JSON data and returns a tuple If successful, the tuple contains a dict {'md5': hash} and a int 200 Will return a dict {'error': error_message} and a int 500 otherwise
625941c221bff66bcd6848f7
def _update_range(self, data, **kwargs): <NEW_LINE> <INDENT> self._client.update_range(data=data, **kwargs)
Update range with data Args: data (bytes): data.
625941c291f36d47f21ac493
def exception(self, msg, *args, **kwargs): <NEW_LINE> <INDENT> if self.isEnabledFor(logging.ERROR): <NEW_LINE> <INDENT> msg, kwargs = self.process(msg, kwargs) <NEW_LINE> kwargs["exc_info"] = 1 <NEW_LINE> self.logger._log(logging.ERROR, _Message(msg, args), (), **kwargs)
Log an ERROR message with exception traceback.
625941c273bcbd0ca4b2c018
def re100_geothermal_both_nopv(context): <NEW_LINE> <INDENT> re100_geothermal_both(context) <NEW_LINE> newlist = [g for g in context.generators if not isinstance(g, generators.PV)] <NEW_LINE> context.generators = newlist
100% renewables plus geothermal, but no CST. >>> class C: pass >>> c = C() >>> re100_geothermal_both_nopv(c) >>> for g in c.generators: assert not isinstance(g, generators.PV)
625941c2d6c5a10208143feb
def handleStatus(self): <NEW_LINE> <INDENT> isAlive = False <NEW_LINE> if self.restServerProcess: <NEW_LINE> <INDENT> isAlive = self.restServerProcess.is_alive() <NEW_LINE> if not isAlive: <NEW_LINE> <INDENT> isAlive = False <NEW_LINE> <DEDENT> <DEDENT> return RESTServerStatus(self.state, isAlive=isAlive)
The handler for an incoming Status message.
625941c27b25080760e393fc
def _del_user(self, command): <NEW_LINE> <INDENT> if len(command) != 2: <NEW_LINE> <INDENT> self._command_not_found() <NEW_LINE> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self._fs.del_user(command[1]) <NEW_LINE> <DEDENT> except (ValueError, PermissionError) as e: <NEW_LINE> <INDENT> print(e)
Удалить пользователя
625941c26aa9bd52df036d45
def list( self, **kwargs ): <NEW_LINE> <INDENT> cls = kwargs.pop('cls', None) <NEW_LINE> error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } <NEW_LINE> error_map.update(kwargs.pop('error_map', {})) <NEW_LINE> api_version = "2019-12-01" <NEW_LINE> accept = "application/json" <NEW_LINE> def prepare_request(next_link=None): <NEW_LINE> <INDENT> header_parameters = {} <NEW_LINE> header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') <NEW_LINE> if not next_link: <NEW_LINE> <INDENT> url = self.list.metadata['url'] <NEW_LINE> path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') <NEW_LINE> request = self._client.get(url, query_parameters, header_parameters) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> url = next_link <NEW_LINE> query_parameters = {} <NEW_LINE> request = self._client.get(url, query_parameters, header_parameters) <NEW_LINE> <DEDENT> return request <NEW_LINE> <DEDENT> def extract_data(pipeline_response): <NEW_LINE> <INDENT> deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response) <NEW_LINE> list_of_elem = deserialized.value <NEW_LINE> if cls: <NEW_LINE> <INDENT> list_of_elem = cls(list_of_elem) <NEW_LINE> <DEDENT> return deserialized.next_link or None, iter(list_of_elem) <NEW_LINE> <DEDENT> def get_next(next_link=None): <NEW_LINE> <INDENT> request = prepare_request(next_link) <NEW_LINE> pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) <NEW_LINE> response = pipeline_response.http_response <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> map_error(status_code=response.status_code, response=response, error_map=error_map) <NEW_LINE> raise HttpResponseError(response=response, error_format=ARMErrorFormat) <NEW_LINE> <DEDENT> return pipeline_response <NEW_LINE> <DEDENT> return ItemPaged( get_next, extract_data )
Lists all the VpnServerConfigurations in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.ListVpnServerConfigurationsResult] :raises: ~azure.core.exceptions.HttpResponseError
625941c21f5feb6acb0c4af5
def test_portlet_login(self): <NEW_LINE> <INDENT> pass
Test that login portlet is patched.
625941c2956e5f7376d70e10
def __call__(self, xpl, rngpl, nnz): <NEW_LINE> <INDENT> tmp = self.sess.run([self.GetPairs], feed_dict = {self.xyzs_pl:xpl,self.rng_pl:rngpl,self.nnz_pl: nnz}) <NEW_LINE> return tmp[0]
Returns the nonzero pairs.
625941c230dc7b766590190b
def release(): <NEW_LINE> <INDENT> i18n() <NEW_LINE> templates() <NEW_LINE> if os.path.isdir("out") == False: <NEW_LINE> <INDENT> os.mkdir("out") <NEW_LINE> <DEDENT> shutil.copytree("src", "out/src") <NEW_LINE> with open("src/manifest.json", "r") as manifest: <NEW_LINE> <INDENT> manifestData = json.load(manifest) <NEW_LINE> del manifestData["key"] <NEW_LINE> manifestData["name"] = re.sub(r"\sDEV", "", manifestData["name"]) <NEW_LINE> manifestData["short_name"] = re.sub(r"\sDEV", "", manifestData["short_name"]) <NEW_LINE> with open("out/src/manifest.json", "w") as builtManifest: <NEW_LINE> <INDENT> json.dump(manifestData, builtManifest, indent=4) <NEW_LINE> <DEDENT> <DEDENT> with open("build/config.cws.json") as file: <NEW_LINE> <INDENT> releaseConfig = json.load(file) <NEW_LINE> rebuildConfig(config_file_path="out/src/config.js", tweak_map=releaseConfig) <NEW_LINE> <DEDENT> shutil.make_archive("release", "zip", root_dir="out/src") <NEW_LINE> shutil.rmtree("out/src") <NEW_LINE> shutil.move("release.zip", "out/release.zip")
Builds release ZIP for Chrome Web Store
625941c20383005118ecf586
def enable_notifications(self, characteristic, enable=True, indication=False): <NEW_LINE> <INDENT> return True
Enable/disable notifications or indications for a given characteristic :param characteristic: BluetoothGattCharacteristic Java object :param enable: enable notifications if True, else disable notifications :param indication: handle indications instead of notifications :return: True, if the operation was initiated successfully
625941c23539df3088e2e2ed
def _get_side_from_slot(player_slot): <NEW_LINE> <INDENT> return "radiant" if player_slot < 128 else "dire"
Get player team based on player slot
625941c29c8ee82313fbb716
def BuildbyPoints(self, request, context): <NEW_LINE> <INDENT> context.set_code(grpc.StatusCode.UNIMPLEMENTED) <NEW_LINE> context.set_details('Method not implemented!') <NEW_LINE> raise NotImplementedError('Method not implemented!')
index RPC service
625941c215baa723493c3f16
def testLearnMajority(self): <NEW_LINE> <INDENT> batch_size = 16 <NEW_LINE> sequence_length = 7 <NEW_LINE> train_steps = 200 <NEW_LINE> eval_steps = 20 <NEW_LINE> cell_type = 'lstm' <NEW_LINE> cell_size = 4 <NEW_LINE> optimizer_type = 'Momentum' <NEW_LINE> learning_rate = 2.0 <NEW_LINE> momentum = 0.9 <NEW_LINE> accuracy_threshold = 0.9 <NEW_LINE> def get_majority_input_fn(batch_size, sequence_length, seed=None): <NEW_LINE> <INDENT> tf.set_random_seed(seed) <NEW_LINE> def input_fn(): <NEW_LINE> <INDENT> random_sequence = tf.random_uniform( [batch_size, sequence_length], 0, 2, dtype=tf.int32, seed=seed) <NEW_LINE> inputs = tf.expand_dims(tf.to_float(random_sequence), 2) <NEW_LINE> labels = tf.to_int32( tf.squeeze( tf.reduce_sum( inputs, reduction_indices=[1]) > (sequence_length / 2.0))) <NEW_LINE> return {'inputs': inputs}, labels <NEW_LINE> <DEDENT> return input_fn <NEW_LINE> <DEDENT> seq_columns = [tf.contrib.layers.real_valued_column( 'inputs', dimension=cell_size)] <NEW_LINE> config = tf.contrib.learn.RunConfig(tf_random_seed=77) <NEW_LINE> sequence_classifier = dynamic_rnn_estimator.single_value_rnn_classifier( num_classes=2, num_units=cell_size, sequence_feature_columns=seq_columns, cell_type=cell_type, optimizer_type=optimizer_type, learning_rate=learning_rate, momentum=momentum, config=config) <NEW_LINE> train_input_fn = get_majority_input_fn(batch_size, sequence_length, 1111) <NEW_LINE> eval_input_fn = get_majority_input_fn(batch_size, sequence_length, 2222) <NEW_LINE> sequence_classifier.fit(input_fn=train_input_fn, steps=train_steps) <NEW_LINE> evaluation = sequence_classifier.evaluate( input_fn=eval_input_fn, steps=eval_steps) <NEW_LINE> accuracy = evaluation['accuracy'] <NEW_LINE> self.assertGreater(accuracy, accuracy_threshold, 'Accuracy should be higher than {}; got {}'.format( accuracy_threshold, accuracy))
Test that `_SequenceClassifier` can learn the 'majority' function.
625941c2462c4b4f79d1d673
def ndr_pad(string): <NEW_LINE> <INDENT> return "\x00" * ((4 - (len(string) & 3)) & 3)
Pad an NDR.
625941c2d18da76e23532476
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=False, n_jobs=None): <NEW_LINE> <INDENT> if not isinstance(X, KNeighborsMixin): <NEW_LINE> <INDENT> X = NearestNeighbors(n_neighbors, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs).fit(X) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _check_params(X, metric, p, metric_params) <NEW_LINE> <DEDENT> query = _query_include_self(X, include_self) <NEW_LINE> return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
Computes the (weighted) graph of k-Neighbors for points in X Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : array-like or BallTree, shape = [n_samples, n_features] Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. n_neighbors : int Number of neighbors for each sample. mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, and 'distance' will return the distances between neighbors according to the given metric. metric : string, default 'minkowski' The distance metric used to calculate the k-Neighbors for each sample point. The DistanceMetric class gives a list of available metrics. The default distance is 'euclidean' ('minkowski' metric with the p param equal to 2.) p : int, default 2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params : dict, optional additional keyword arguments for the metric function. include_self : bool, default=False. Whether or not to mark each sample as the first nearest neighbor to itself. If `None`, then True is used for mode='connectivity' and False for mode='distance' as this will preserve backwards compatibility. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import kneighbors_graph >>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True) >>> A.toarray() array([[1., 0., 1.], [0., 1., 1.], [1., 0., 1.]]) See also -------- radius_neighbors_graph
625941c2a934411ee3751636
def multiply(self, num1, num2): <NEW_LINE> <INDENT> if num1 == '0' or num2 == '0': <NEW_LINE> <INDENT> return '0' <NEW_LINE> <DEDENT> len1 = len(num1) <NEW_LINE> len2 = len(num2) <NEW_LINE> ret = '' <NEW_LINE> remain = 0 <NEW_LINE> for i in range(len1 + len2 - 1): <NEW_LINE> <INDENT> sum = 0 <NEW_LINE> sum += remain <NEW_LINE> for j in range(len2 - 1, -1, -1): <NEW_LINE> <INDENT> k = len1 + len2 - 2 - i - j <NEW_LINE> if k >= 0 and k < len1: <NEW_LINE> <INDENT> sum += int(num1[k]) * int(num2[j]) <NEW_LINE> <DEDENT> <DEDENT> remain = sum // 10 <NEW_LINE> ret += str(sum % 10) <NEW_LINE> <DEDENT> if remain > 0: <NEW_LINE> <INDENT> ret += str(remain) <NEW_LINE> <DEDENT> return ret[::-1]
:type num1: str :type num2: str :rtype: str
625941c28a349b6b435e8116
def __rsub__(self, an_angle): <NEW_LINE> <INDENT> negated_self = Angle(list(map(lambda x: -x, self.coefficients))) <NEW_LINE> return negated_self + an_angle
Specifications as for __sub__ except self is second term as in int - Angle or float - Angle
625941c2a17c0f6771cbdff5
def normalize(self,norm,t=0.): <NEW_LINE> <INDENT> self._amp*= norm/nu.fabs(self.Rforce(1.,0.,t=t))
NAME: normalize PURPOSE: normalize a potential in such a way that vc(R=1,z=0)=1., or a fraction of this INPUT: norm - normalize such that Rforce(R=1,z=0) is such that it is 'norm' of the force necessary to make vc(R=1,z=0)=1 (if True, norm=1) OUTPUT: (none) HISTORY: 2010-07-10 - Written - Bovy (NYU)
625941c276e4537e8c351613
def parse_topic(address): <NEW_LINE> <INDENT> data = BeautifulSoup(requests.get(address).text, 'lxml') <NEW_LINE> article_list = data.find_all('div', {'class': 'item item_story-single js-story-item'}) <NEW_LINE> result = [] <NEW_LINE> for article in article_list: <NEW_LINE> <INDENT> url = article.find('a', {'class': 'item__link no-injects js-yandex-counter'})['href'].strip() <NEW_LINE> title = article.find('span', {'class': 'item__title'}).text.strip() <NEW_LINE> time_str = article.find('span', {'class': 'item__info'}).text.strip() <NEW_LINE> time = dateparser.parse(time_str) <NEW_LINE> result.append({'url': url, 'title': title, 'time': time}) <NEW_LINE> <DEDENT> return result
Парсит статьи в теме :param address: адрес темы :return: список распарсенных статей
625941c2dd821e528d63b14d
def krmtcng_usetable_vacuum(self, logger_body): <NEW_LINE> <INDENT> vacuum_tr_table = "VACUUM ANALYZE " + self.tr_tablename <NEW_LINE> vacuum_krmtcng = "VACUUM ANALYZE tm_005_krmtcng" <NEW_LINE> try: <NEW_LINE> <INDENT> common.database.Database.vacuumDb(self, vacuum_tr_table) <NEW_LINE> common.database.Database.vacuumDb(self, vacuum_krmtcng) <NEW_LINE> common.database.Database.commitDb(self) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> logger_body.info(vacuum_tr_table) <NEW_LINE> logger_body.info(vacuum_krskcng) <NEW_LINE> logger_body.info(str(type(e))) <NEW_LINE> logger_body.info(str(e))
トランザクションテーブルと倉先変換マスタテーブルをVACUUMする
625941c2a8370b7717052843
def TcToNum(s): <NEW_LINE> <INDENT> if s == "": <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> elif len(s) > 8: <NEW_LINE> <INDENT> return "" <NEW_LINE> <DEDENT> elif len(s) < 8: <NEW_LINE> <INDENT> return "" <NEW_LINE> <DEDENT> elif Binarytonum(s) >= 128: <NEW_LINE> <INDENT> Negnum = (255 - Binarytonum(s) + 1)*-1 <NEW_LINE> return Negnum <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return Binarytonum(s)
converts 2's complement number to decimal value
625941c2d268445f265b4e11
def set_size(self, size): <NEW_LINE> <INDENT> self.size = size <NEW_LINE> self.set_center(self.center)
Change size.
625941c2eab8aa0e5d26dafa
def test_positive_foreman_module(self): <NEW_LINE> <INDENT> rpm_result = ssh.command('rpm -q foreman-selinux') <NEW_LINE> self.assertEqual(rpm_result.return_code, 0) <NEW_LINE> semodule_result = ssh.command('semodule -l | grep foreman') <NEW_LINE> self.assertEqual(semodule_result.return_code, 0) <NEW_LINE> rpm_version = self.version_regex.search( ''.join(rpm_result.stdout)).group(1) <NEW_LINE> semodule_version = self.version_regex.search( ''.join(semodule_result.stdout)).group(1) <NEW_LINE> rpm_version = rpm_version[:-2] <NEW_LINE> self.assertEqual(rpm_version.replace('-', '.'), semodule_version)
Check if SELinux foreman module has the right version :id: a0736b3a-3d42-4a09-a11a-28c1d58214a5 :expectedresults: Foreman RPM and SELinux module versions match
625941c2a219f33f3462890f
def __init__(self, Trange = (0.0, 0.0), coeffs = [], p0 = -1.0): <NEW_LINE> <INDENT> self._t = Trange <NEW_LINE> self._pref = p0 <NEW_LINE> if len(coeffs) != 9: <NEW_LINE> <INDENT> raise CTI_Error('NASA9 coefficient list must have length = 9') <NEW_LINE> <DEDENT> self._coeffs = coeffs
:param Trange: The temperature range over which the parameterization is valid. This must be entered as a sequence of two temperature values. Required. :param coeffs: List of nine coefficients :math:`(a_0, \ldots , a_8)` :param p0: The reference-state pressure, usually 1 atm or 1 bar. If omitted, the default value is used, which is set by the ``standard_pressure`` directive.
625941c230bbd722463cbd67
def Read(self, data, address = None, size = None): <NEW_LINE> <INDENT> if not address: <NEW_LINE> <INDENT> address = self.memAddress <NEW_LINE> <DEDENT> if hasattr(address, 'value'): <NEW_LINE> <INDENT> address = address.value <NEW_LINE> <DEDENT> if size: <NEW_LINE> <INDENT> nSize = win32structures.ULONG_PTR(size) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nSize = win32structures.ULONG_PTR(ctypes.sizeof(data)) <NEW_LINE> <DEDENT> if self.size < nSize.value: <NEW_LINE> <INDENT> raise Exception('Read: RemoteMemoryBlock is too small (' + str(self.size) + ' bytes), ' + str(nSize.value) + ' is required.') <NEW_LINE> <DEDENT> if hex(address).lower().startswith('0xffffff'): <NEW_LINE> <INDENT> raise Exception('Read: RemoteMemoryBlock has incorrect address =' + hex(address)) <NEW_LINE> <DEDENT> lpNumberOfBytesRead = ctypes.c_size_t(0) <NEW_LINE> ret = win32functions.ReadProcessMemory( ctypes.c_void_p(self.process), ctypes.c_void_p(address), ctypes.byref(data), nSize, ctypes.byref(lpNumberOfBytesRead)) <NEW_LINE> if ret == 0: <NEW_LINE> <INDENT> ret = win32functions.ReadProcessMemory( ctypes.c_void_p(self.process), ctypes.c_void_p(address), ctypes.byref(data), nSize, ctypes.byref(lpNumberOfBytesRead)) <NEW_LINE> if ret == 0: <NEW_LINE> <INDENT> last_error = win32api.GetLastError() <NEW_LINE> if last_error != win32defines.ERROR_PARTIAL_COPY: <NEW_LINE> <INDENT> print('\nError: Read: WARNING! self.memAddress =', self.memAddress, ' data address =', ctypes.byref(data)) <NEW_LINE> print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip()) <NEW_LINE> print('lpNumberOfBytesRead =', lpNumberOfBytesRead, ' nSize =', nSize) <NEW_LINE> print('Caller stack:') <NEW_LINE> for frame in inspect.stack(): <NEW_LINE> <INDENT> print(frame[1:]) <NEW_LINE> <DEDENT> print() <NEW_LINE> sys.stdout.flush() <NEW_LINE> raise ctypes.WinError() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Error: ERROR_PARTIAL_COPY') <NEW_LINE> print('\nRead: WARNING! self.memAddress =', self.memAddress, ' data address =', ctypes.byref(data)) <NEW_LINE> print('lpNumberOfBytesRead =', lpNumberOfBytesRead, ' nSize =', nSize) <NEW_LINE> print('Caller stack:') <NEW_LINE> for frame in inspect.stack(): <NEW_LINE> <INDENT> print('\t\t', frame[1:]) <NEW_LINE> <DEDENT> print() <NEW_LINE> sys.stdout.flush() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print('Read OK: 2nd attempt!') <NEW_LINE> <DEDENT> <DEDENT> self.CheckGuardSignature() <NEW_LINE> return data
Read data from the memory block
625941c21d351010ab855abf
def __init__(self, name: str): <NEW_LINE> <INDENT> self.name = name
:param name: command name
625941c2f8510a7c17cf969e
def rotate_coordinates(self, in_x, in_y, in_z ): <NEW_LINE> <INDENT> if self.pargs['rotation']: <NEW_LINE> <INDENT> x = self.rotation_matrix[0][0]*in_x + self.rotation_matrix[0][1]*in_y + self.rotation_matrix[0][2]*in_z <NEW_LINE> y = self.rotation_matrix[1][0]*in_x + self.rotation_matrix[1][1]*in_y + self.rotation_matrix[1][2]*in_z <NEW_LINE> z = self.rotation_matrix[2][0]*in_x + self.rotation_matrix[2][1]*in_y + self.rotation_matrix[2][2]*in_z <NEW_LINE> return x, y, z <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return in_x, in_y, in_z
Transforms from box coordinates into lattice coordinates, based on the pre-set rotation values.
625941c2b830903b967e98b0