Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
371,700
def diffPrefsPrior(priorstring): assert isinstance(priorstring, str) prior = priorstring.split() if len(prior) == 3 and prior[0] == : [c1, c2] = [float(x) for x in prior[1 : ]] assert c1 > 0 and c2 > 0, "C1 and C2 must be > 1 for invquadratic prior" return (, c1, c2) else: raise ValueError("Invalid diffprefsprior: {0}".format(priorstring))
Parses `priorstring` and returns `prior` tuple.
371,701
def spherical_sum(image, binning_factor=1.0): r = np.sqrt(sum(xi ** 2 for xi in image.space.meshgrid)) rmax = max(np.linalg.norm(c) for c in image.space.domain.corners()) n_bins = int(np.sqrt(sum(n ** 2 for n in image.shape)) / binning_factor) rad_sum, _ = np.histogram(r, weights=image, bins=n_bins, range=(0, rmax)) out_spc = uniform_discr(min_pt=0, max_pt=rmax, shape=n_bins, impl=image.space.impl, dtype=image.space.dtype, interp="linear", axis_labels=["$r$"]) return out_spc.element(rad_sum)
Sum image values over concentric annuli. Parameters ---------- image : `DiscreteLp` element Input data whose radial sum should be computed. binning_factor : positive float, optional Reduce the number of output bins by this factor. Increasing this number can help reducing fluctuations due to the variance of points that fall in a particular annulus. A binning factor of ``1`` corresponds to a bin size equal to image pixel size for images with square pixels, otherwise :: max(norm2(c)) / norm2(shape) where the maximum is taken over all corners of the image domain. Returns ------- spherical_sum : 1D `DiscreteLp` element The spherical sum of ``image``. Its space is one-dimensional with domain ``[0, rmax]``, where ``rmax`` is the radius of the smallest ball containing ``image.space.domain``. Its shape is ``(N,)`` with :: N = int(sqrt(sum(n ** 2 for n in image.shape)) / binning_factor)
371,702
def find_host_network_interface_by_id(self, id_p): if not isinstance(id_p, basestring): raise TypeError("id_p can only be an instance of type basestring") network_interface = self._call("findHostNetworkInterfaceById", in_p=[id_p]) network_interface = IHostNetworkInterface(network_interface) return network_interface
Searches through all host network interfaces for an interface with the given GUID. The method returns an error if the given GUID does not correspond to any host network interface. in id_p of type str GUID of the host network interface to search for. return network_interface of type :class:`IHostNetworkInterface` Found host network interface object.
371,703
def _init_template(self, cls, base_init_template): if self.__class__ is not cls: raise TypeError("Inheritance from classes with @GtkTemplate decorators " "is not allowed at this time") connected_signals = set() self.__connected_template_signals__ = connected_signals base_init_template(self) for name in self.__gtemplate_widgets__: widget = self.get_template_child(cls, name) self.__dict__[name] = widget if widget is None: "but was not present in template") % name warnings.warn(errmsg, GtkTemplateWarning)
This would be better as an override for Gtk.Widget
371,704
def from_raw(self, file_names=None, **kwargs): if file_names: self.file_names = file_names if not isinstance(file_names, (list, tuple)): self.file_names = [file_names, ] raw_file_loader = self.loader set_number = 0 test = None counter = 0 self.logger.debug("start iterating through file(s)") for f in self.file_names: self.logger.debug("loading raw file:") self.logger.debug(f"{f}") new_tests = raw_file_loader(f, **kwargs) if new_tests: if test is not None: self.logger.debug("continuing reading files...") _test = self._append(test[set_number], new_tests[set_number]) if not _test: self.logger.warning(f"EMPTY TEST: {f}") continue test[set_number] = _test self.logger.debug("added this test - started merging") for j in range(len(new_tests[set_number].raw_data_files)): raw_data_file = new_tests[set_number].raw_data_files[j] file_size = new_tests[set_number].raw_data_files_length[j] test[set_number].raw_data_files.append(raw_data_file) test[set_number].raw_data_files_length.append(file_size) counter += 1 if counter > 10: self.logger.debug("ERROR? Too many files to merge") raise ValueError("Too many files to merge - " "could be a p2-p3 zip thing") else: self.logger.debug("getting data from first file") if new_tests[set_number].no_data: self.logger.debug("NO DATA") else: test = new_tests else: self.logger.debug("NOTHING LOADED") self.logger.debug("finished loading the raw-files") test_exists = False if test: if test[0].no_data: self.logging.debug("the first dataset (or only dataset) loaded from the raw data file is empty") else: test_exists = True if test_exists: if not prms.Reader.sorted_data: self.logger.debug("sorting data") test[set_number] = self._sort_data(test[set_number]) self.datasets.append(test[set_number]) else: self.logger.warning("No new datasets added!") self.number_of_datasets = len(self.datasets) self.status_datasets = self._validate_datasets() self._invent_a_name() return self
Load a raw data-file. Args: file_names (list of raw-file names): uses CellpyData.file_names if None. If the list contains more than one file name, then the runs will be merged together.
371,705
def init_session(self): if self.session: self.session.close() self.session = make_session(self.username, self.password, self.bearer_token, self.extra_headers_dict)
Defines a session object for passing requests.
371,706
def __optimize_configuration(self): index_neighbor = 0 while (index_neighbor < self.__maxneighbor): current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)] current_medoid_cluster_index = self.__belong[current_medoid_index] candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1) while candidate_medoid_index in self.__current: candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1) candidate_cost = 0.0 for point_index in range(0, len(self.__pointer_data)): if point_index not in self.__current: point_cluster_index = self.__belong[point_index] point_medoid_index = self.__current[point_cluster_index] other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index) other_medoid_cluster_index = self.__belong[other_medoid_index] distance_current = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index]) distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index]) distance_nearest = float() if ( (point_medoid_index != candidate_medoid_index) and (point_medoid_index != current_medoid_cluster_index) ): distance_nearest = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[point_medoid_index]) if (point_cluster_index == current_medoid_cluster_index): if (distance_candidate >= distance_nearest): candidate_cost += distance_nearest - distance_current else: candidate_cost += distance_candidate - distance_current elif (point_cluster_index == other_medoid_cluster_index): if (distance_candidate > distance_nearest): pass; else: candidate_cost += distance_candidate - distance_nearest if (candidate_cost < 0): self.__current[current_medoid_cluster_index] = candidate_medoid_index self.__update_clusters(self.__current) index_neighbor = 0 else: index_neighbor += 1
! @brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
371,707
def accuracy(self): if self._input_csv_files: df = self._get_data_from_csv_files() if not in df or not in df: raise ValueError() labels = sorted(set(df[]) | set(df[])) accuracy_results = [] for label in labels: correct_count = len(df[(df[] == df[]) & (df[] == label)]) total_count = len(df[(df[] == label)]) accuracy_results.append({ : label, : float(correct_count) / total_count if total_count > 0 else 0, : total_count }) total_correct_count = len(df[(df[] == df[])]) if len(df) > 0: total_accuracy = float(total_correct_count) / len(df) accuracy_results.append({: , : total_accuracy, : len(df)}) return pd.DataFrame(accuracy_results) elif self._bigquery: query = bq.Query( % self._bigquery) query_all = bq.Query( % self._bigquery) df = self._get_data_from_bigquery([query, query_all]) return df
Get accuracy numbers for each target and overall. Returns: A DataFrame with two columns: 'class' and 'accuracy'. It also contains the overall accuracy with class being '_all'. Raises: Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery does not return 'target' or 'predicted' column.
371,708
def most_seen_creators_by_works_card(work_kind=None, role_name=None, num=10): object_list = most_seen_creators_by_works( work_kind=work_kind, role_name=role_name, num=num) object_list = chartify(object_list, , cutoff=1) if role_name: creators_name = .format(role_name.capitalize()) else: creators_name = if work_kind: works_name = Work.get_kind_name_plural(work_kind).lower() else: works_name = card_title = .format(creators_name, works_name) return { : card_title, : , : object_list, }
Displays a card showing the Creators that are associated with the most Works. e.g.: {% most_seen_creators_by_works_card work_kind='movie' role_name='Director' num=5 %}
371,709
def normalize_pred_string(predstr): tokens = [t for t in split_pred_string(predstr)[:3] if t is not None] if predstr.lstrip("__'.join(tokens).lower()
Normalize the predicate string *predstr* to a conventional form. This makes predicate strings more consistent by removing quotes and the `_rel` suffix, and by lowercasing them. Examples: >>> normalize_pred_string('"_dog_n_1_rel"') '_dog_n_1' >>> normalize_pred_string('_dog_n_1') '_dog_n_1'
371,710
def use(self, middleware=None, path=, method_mask=HTTPMethod.ALL): if middleware is None: return lambda mw: self.use(mw, path, method_mask) if hasattr(middleware, ): router = getattr(middleware, ) if isinstance(router, (types.MethodType,)): router = router() self.add_router(path, router) elif isinstance(type(middleware), RouterMeta): router = middleware._RouterMeta__growler_router() self.add_router(path, router) elif hasattr(middleware, ): for mw in middleware: self.use(mw, path, method_mask) else: log.info("{} Using {} on path {}", id(self), middleware, path) self.middleware.add(path=path, func=middleware, method_mask=method_mask) return middleware
Use the middleware (a callable with parameters res, req, next) upon requests match the provided path. A None path matches every request. Returns the middleware so this method may be used as a decorator. Args: middleware (callable): A function with signature '(req, res)' to be called with every request which matches path. path (str or regex): Object used to test the requests path. If it matches, either by equality or a successful regex match, the middleware is called with the req/res pair. method_mask (Optional[HTTPMethod]): Filters requests by HTTP method. The HTTPMethod enum behaves as a bitmask, so multiple methods may be joined by `+` or `\|`, removed with `-`, or toggled with `^` (e.g. `HTTPMethod.GET + HTTPMethod.POST`, `HTTPMethod.ALL - HTTPMethod.DELETE`). Returns: Returns the provided middleware; a requirement for this method to be used as a decorator.
371,711
def set_offset_and_sequence_number(self, event_data): if not event_data: raise Exception(event_data) self.offset = event_data.offset.value self.sequence_number = event_data.sequence_number
Updates offset based on event. :param event_data: A received EventData with valid offset and sequenceNumber. :type event_data: ~azure.eventhub.common.EventData
371,712
def get_template_names(self): if self.request.is_ajax(): return [self.list_template_name] else: return super(Search, self).get_template_names()
Dispatch template according to the kind of request: ajax or normal.
371,713
def getPlatformsByName(platformNames=[], mode=None, tags=[], excludePlatformNames=[]): allPlatformsList = getAllPlatformObjects(mode) platformList = [] if "all" in platformNames and len(tags) == 0: for plat in allPlatformsList: if str(plat.platformName).lower() not in excludePlatformNames: platformList.append(plat) return platformList else: for name in platformNames: if name not in excludePlatformNames: for plat in allPlatformsList: if name == str(plat.platformName).lower(): platformList.append(plat) break try: if name == str(plat.parameterName).lower(): platformList.append(plat) break except: pass for t in plat.tags: if t in tags: platformList.append(plat) break if platformList == []: return allPlatformsList else: return platformList
Method that recovers the names of the <Platforms> in a given list. :param platformNames: List of strings containing the possible platforms. :param mode: The mode of the search. The following can be chosen: ["phonefy", "usufy", "searchfy"]. :param tags: Just in case the method to select the candidates is a series of tags. :param excludePlatformNames: List of strings to be excluded from the search. :return: Array of <Platforms> classes.
371,714
def load_history(self, f): warnings.warn( "load_history is deprecated and will be removed in the next " "release, please use load_params with the f_history keyword", DeprecationWarning) self.history = History.from_file(f)
Load the history of a ``NeuralNet`` from a json file. See ``save_history`` for examples. Parameters ---------- f : file-like object or str
371,715
def configuration(self, event): try: self.log("Schemarequest for all configuration schemata from", event.user.account.name, lvl=debug) response = { : , : , : configschemastore } self.fireEvent(send(event.client.uuid, response)) except Exception as e: self.log("ERROR:", e)
Return all configurable components' schemata
371,716
def set_paths(etc_paths = [ "/etc/" ]): global _ETC_PATHS _ETC_PATHS = [] for p in etc_paths: _ETC_PATHS.append(os.path.expanduser(p))
Sets the paths where the configuration files will be searched * You can have multiple configuration files (e.g. in the /etc/default folder and in /etc/appfolder/)
371,717
def is_carrying_minerals(self) -> bool: return any( buff.value in self._proto.buff_ids for buff in {BuffId.CARRYMINERALFIELDMINERALS, BuffId.CARRYHIGHYIELDMINERALFIELDMINERALS} )
Checks if a worker or MULE is carrying (gold-)minerals.
371,718
def map_tree(visitor, tree): newn = [map_tree(visitor, node) for node in tree.nodes] return visitor(tree, newn)
Apply function to nodes
371,719
def _make_lcdproc( lcd_host, lcd_port, retry_config, charset=DEFAULT_LCDPROC_CHARSET, lcdd_debug=False): class ServerSpawner(utils.AutoRetryCandidate): @utils.auto_retry def connect(self): return lcdrunner.LcdProcServer( lcd_host, lcd_port, charset=charset, debug=lcdd_debug) spawner = ServerSpawner(retry_config=retry_config, logger=logger) try: return spawner.connect() except socket.error as e: logger.error(, lcd_host, lcd_port, e) raise SystemExit(1)
Create and connect to the LCDd server. Args: lcd_host (str): the hostname to connect to lcd_prot (int): the port to connect to charset (str): the charset to use when sending messages to lcdproc lcdd_debug (bool): whether to enable full LCDd debug retry_attempts (int): the number of connection attempts retry_wait (int): the time to wait between connection attempts retry_backoff (int): the backoff for increasing inter-attempt delay Returns: lcdproc.server.Server
371,720
def anonymous_required(func=None, url=None): url = url or "/" def _dec(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if request.user.is_authenticated(): return redirect(url) else: return view_func(request, *args, **kwargs) return _wrapped_view if func is None: return _dec else: return _dec(func)
Required that the user is not logged in.
371,721
def remove_record(self, orcid_id, token, request_type, put_code): self._update_activities(orcid_id, token, requests.delete, request_type, put_code=put_code)
Add a record to a profile. Parameters ---------- :param orcid_id: string Id of the author. :param token: string Token received from OAuth 2 3-legged authorization. :param request_type: string One of 'activities', 'education', 'employment', 'funding', 'peer-review', 'work'. :param put_code: string The id of the record. Can be retrieved using read_record_* method. In the result of it, it will be called 'put-code'.
371,722
def get_all_objects(self): "Return pointers to all GC tracked objects" for i, generation in enumerate(self.gc_generations): generation_head_ptr = pygc_head_ptr = generation.head.get_pointer() generation_head_addr = generation_head_ptr._value while True: pygc_head_ptr = pygc_head_ptr.deref().gc_next if pygc_head_ptr._value == generation_head_addr: break yield pygc_head_ptr.deref().get_object_ptr()
Return pointers to all GC tracked objects
371,723
def native(self, value, context=None): separator = self.separator.strip() if self.strip and hasattr(self.separator, ) else self.separator value = super().native(value, context) if value is None: return self.cast() if hasattr(value, ): value = value.split(separator) value = self._clean(value) try: return self.cast(value) if self.cast else value except Exception as e: raise Concern("{0} caught, failed to perform array transform: {1}", e.__class__.__name__, str(e))
Convert the given string into a list of substrings.
371,724
def get_assessments_taken_by_query(self, assessment_taken_query): and_list = list() or_list = list() for term in assessment_taken_query._query_terms: if in assessment_taken_query._query_terms[term] and in assessment_taken_query._query_terms[term]: and_list.append( {: [{term: {: assessment_taken_query._query_terms[term][]}}, {term: {: assessment_taken_query._query_terms[term][]}}]}) else: and_list.append({term: assessment_taken_query._query_terms[term]}) for term in assessment_taken_query._keyword_terms: or_list.append({term: assessment_taken_query._keyword_terms[term]}) if or_list: and_list.append({: or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {: and_list} collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find(query_terms).sort(, DESCENDING) else: result = [] return objects.AssessmentTakenList(result, runtime=self._runtime, proxy=self._proxy)
Gets a list of ``AssessmentTaken`` elements matching the given assessment taken query. arg: assessment_taken_query (osid.assessment.AssessmentTakenQuery): the assessment taken query return: (osid.assessment.AssessmentTakenList) - the returned ``AssessmentTakenList`` raise: NullArgument - ``assessment_taken_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_taken_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
371,725
def is_transaction_invalidated(transaction, state_change): is_our_failed_update_transfer = ( isinstance(state_change, ContractReceiveChannelSettled) and isinstance(transaction, ContractSendChannelUpdateTransfer) and state_change.token_network_identifier == transaction.token_network_identifier and state_change.channel_identifier == transaction.channel_identifier ) if is_our_failed_update_transfer: return True return False
True if the `transaction` is made invalid by `state_change`. Some transactions will fail due to race conditions. The races are: - Another transaction which has the same side effect is executed before. - Another transaction which *invalidates* the state of the smart contract required by the local transaction is executed before it. The first case is handled by the predicate `is_transaction_effect_satisfied`, where a transaction from a different source which does the same thing is considered. This predicate handles the second scenario. A transaction can **only** invalidate another iff both share a valid initial state but a different end state. Valid example: A close can invalidate a deposit, because both a close and a deposit can be executed from an opened state (same initial state), but a close transaction will transition the channel to a closed state which doesn't allow for deposits (different end state). Invalid example: A settle transaction cannot invalidate a deposit because a settle is only allowed for the closed state and deposits are only allowed for the open state. In such a case a deposit should never have been sent. The deposit transaction for an invalid state is a bug and not a transaction which was invalidated.
371,726
def _register_namespace_and_command(self, namespace): self._add_namespace(namespace) cmd_name = namespace.source_name.split(".", 1)[0] + ".$cmd" dest_cmd_name = namespace.dest_name.split(".", 1)[0] + ".$cmd" self._add_namespace(Namespace(dest_name=dest_cmd_name, source_name=cmd_name))
Add a Namespace and the corresponding command namespace.
371,727
def add_arguments(parser): parser.add_argument(, , help=, required=False, nargs=) parser.add_argument(, , help=, action=)
Args for the init command
371,728
def get_weather_data(filename=, **kwargs): r if not in kwargs: kwargs[] = os.path.join(os.path.split( os.path.dirname(__file__))[0], ) file = os.path.join(kwargs[], filename) weather_df = pd.read_csv( file, index_col=0, header=[0, 1], date_parser=lambda idx: pd.to_datetime(idx, utc=True)) weather_df.index = pd.to_datetime(weather_df.index).tz_convert( ) weather_df.columns = [weather_df.axes[1].levels[0][ weather_df.axes[1].codes[0]], weather_df.axes[1].levels[1][ weather_df.axes[1].codes[1]].astype(int)] return weather_df
r""" Imports weather data from a file. The data include wind speed at two different heights in m/s, air temperature in two different heights in K, surface roughness length in m and air pressure in Pa. The file is located in the example folder of the windpowerlib. The height in m for which the data applies is specified in the second row. Parameters ---------- filename : string Filename of the weather data file. Default: 'weather.csv'. Other Parameters ---------------- datapath : string, optional Path where the weather data file is stored. Default: 'windpowerlib/example'. Returns ------- weather_df : pandas.DataFrame DataFrame with time series for wind speed `wind_speed` in m/s, temperature `temperature` in K, roughness length `roughness_length` in m, and pressure `pressure` in Pa. The columns of the DataFrame are a MultiIndex where the first level contains the variable name as string (e.g. 'wind_speed') and the second level contains the height as integer at which it applies (e.g. 10, if it was measured at a height of 10 m).
371,729
def match(self, data, threshold=0.5, generator=False): blocked_pairs = self._blockData(data) clusters = self.matchBlocks(blocked_pairs, threshold) if generator: return clusters else: return list(clusters)
Identifies records that all refer to the same entity, returns tuples containing a set of record ids and a confidence score as a float between 0 and 1. The record_ids within each set should refer to the same entity and the confidence score is a measure of our confidence that all the records in a cluster refer to the same entity. This method should only used for small to moderately sized datasets for larger data, use matchBlocks Arguments: data -- Dictionary of records, where the keys are record_ids and the values are dictionaries with the keys being field names threshold -- Number between 0 and 1 (default is .5). We will consider records as potential duplicates if the predicted probability of being a duplicate is above the threshold. Lowering the number will increase recall, raising it will increase precision
371,730
def main(argv=None): if argv is None: argv = sys.argv else: sys.argv.extend(argv) program_name = os.path.basename(sys.argv[0]) program_version = "v%s" % __version__ program_build_date = str(__updated__) program_version_message = % (program_version, program_build_date) program_shortdesc = __import__().__doc__.split("\n")[1] program_license = % (program_shortdesc, str(__date__)) try: parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter) parser.add_argument(, , dest=, help=) parser.add_argument(, , dest=, help=) parser.add_argument(, , dest=, default=None, help=) parser.add_argument(, , dest=, required=True, default=None, help=) parser.add_argument(, , dest=, required=True, default=None, help=) parser.add_argument(, , dest=, default="SRV_"+datetime.now().isoformat(), help=) parser.add_argument(, , dest=, type=int, default=2, help=) parser.add_argument(, , dest=, type=int, default=4, help=) parser.add_argument(, , dest=, type=int, default=4, help=) parser.add_argument(, , dest=, default="HDD", help=) parser.add_argument(, , dest=, default=None, help=) parser.add_argument(, , dest=, default=None, help=) parser.add_argument(, , dest="verbose", action="count", help="set verbosity level [default: %(default)s]") parser.add_argument(, , action=, version=program_version_message) args = parser.parse_args() global verbose verbose = args.verbose dc_id = args.datacenterid lan_id = args.lanid servername = args.servername if verbose > 0: print("Verbose mode on") print("start {} with args {}".format(program_name, str(args))) hdimage = args.imageid cdimage = None if args.bootdevice == "CDROM": hdimage = None cdimage = args.imageid print("using boot device {} with image {}" .format(args.bootdevice, args.imageid)) (user, password) = getLogin(args.loginfile, args.user, args.password) if user is None or password is None: raise ValueError("user or password resolved to None") pbclient = ProfitBricksService(user, password) first_nic = NIC(name="local", ips=[], dhcp=True, lan=lan_id) volume = Volume(name=servername+"-Disk", size=args.storage, image=hdimage, image_password=args.imgpassword) server = Server(name=servername, cores=args.cores, ram=args.ram*1024, create_volumes=[volume], nics=[first_nic], boot_cdrom=cdimage) print("creating server..") if verbose > 0: print("SERVER: {}".format(str(server))) response = pbclient.create_server(dc_id, server) print("wait for provisioning..") wait_for_request(pbclient, response["requestId"]) server_id = response[] print("Server provisioned with ID {}".format(server_id)) nics = pbclient.list_nics(dc_id, server_id, 1) if not nics[]: raise CLIError("No NICs found for newly created server {}" .format(server_id)) nic0 = nics[][0] if verbose > 0: print("NIC0: {}".format(str(nic0))) (nic_id, nic_mac) = (nic0[], nic0[][]) print("NIC of new Server has ID {} and MAC {}".format(nic_id, nic_mac)) print("{} finished w/o errors".format(program_name)) return 0 except KeyboardInterrupt: return 0 except Exception: traceback.print_exc() sys.stderr.write("\n" + program_name + ": for help use --help\n") return 2
Parse command line options and create a server/volume composite.
371,731
def create_anisomagplot(plotman, x, y, z, alpha, options): sizex, sizez = getfigsize(plotman) f, ax = plt.subplots(2, 3, figsize=(3 * sizex, 2 * sizez)) if options.title is not None: plt.suptitle(options.title, fontsize=18) plt.subplots_adjust(wspace=1.5, top=2) if options.cmaglin: cidx = plotman.parman.add_data(np.power(10, x)) cidy = plotman.parman.add_data(np.power(10, y)) cidz = plotman.parman.add_data(np.power(10, z)) loglin = else: cidx = plotman.parman.add_data(x) cidy = plotman.parman.add_data(y) cidz = plotman.parman.add_data(z) loglin = cidxy = plotman.parman.add_data(np.divide(x, y)) cidyz = plotman.parman.add_data(np.divide(y, z)) cidzx = plotman.parman.add_data(np.divide(z, x)) plot_mag(cidx, ax[0, 0], plotman, , loglin, alpha, options.mag_vmin, options.mag_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) plot_mag(cidy, ax[0, 1], plotman, , loglin, alpha, options.mag_vmin, options.mag_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) plot_mag(cidz, ax[0, 2], plotman, , loglin, alpha, options.mag_vmin, options.mag_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) plot_ratio(cidxy, ax[1, 0], plotman, , alpha, options.rat_vmin, options.rat_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) plot_ratio(cidyz, ax[1, 1], plotman, , alpha, options.rat_vmin, options.rat_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) plot_ratio(cidzx, ax[1, 2], plotman, , alpha, options.rat_vmin, options.rat_vmax, options.xmin, options.xmax, options.zmin, options.zmax, options.unit, options.mag_cbtiks, options.no_elecs, ) f.tight_layout() f.savefig(, dpi=300) return f, ax
Plot the data of the tomodir in one overview plot.
371,732
def _get_area_data(self): if self.mode == : data = [self.hist[].data,\ self.hist[].data,\ self.hist[].data,\ self.hist[].data] elif self.area == : data = [self.hist[].data,\ self.hist[].data,\ self.hist[].data,\ self.hist[].data] elif self.area == : data = [self.hist[].data,\ self.hist[].data,\ self.hist[].data,\ self.hist[].data] else: data = [] if self.mode == : data.extend([self.hist[].data,self.hist[].data, self.hist[].data,self.hist[].data, self.hist[].data,self.hist[].data, self.hist[].data,self.hist[].data]) return [np.copy(d) for d in data]
Get histogram list based on area type. List pattern: [type1_hel+,type2_hel+,type1_hel-,type2_hel-] where type1/2 = F/B or R/L in that order.
371,733
def statistic_recommend(classes, P): if imbalance_check(P): return IMBALANCED_RECOMMEND if binary_check(classes): return BINARY_RECOMMEND return MULTICLASS_RECOMMEND
Return recommend parameters which are more suitable due to the input dataset characteristics. :param classes: all classes name :type classes : list :param P: condition positive :type P : dict :return: recommendation_list as list
371,734
def do_alias(self, arg): args = arg.split() if len(args) == 0: keys = sorted(self.aliases.keys()) for alias in keys: self.message("%s = %s" % (alias, self.aliases[alias])) return if args[0] in self.aliases and len(args) == 1: self.message("%s = %s" % (args[0], self.aliases[args[0]])) else: self.aliases[args[0]] = .join(args[1:])
alias [name [command [parameter parameter ...] ]] Create an alias called 'name' that executes 'command'. The command must *not* be enclosed in quotes. Replaceable parameters can be indicated by %1, %2, and so on, while %* is replaced by all the parameters. If no command is given, the current alias for name is shown. If no name is given, all aliases are listed. Aliases may be nested and can contain anything that can be legally typed at the pdb prompt. Note! You *can* override internal pdb commands with aliases! Those internal commands are then hidden until the alias is removed. Aliasing is recursively applied to the first word of the command line; all other words in the line are left alone. As an example, here are two useful aliases (especially when placed in the .pdbrc file): # Print instance variables (usage "pi classInst") alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) # Print instance variables in self alias ps pi self
371,735
def novatel_diag_send(self, timeStatus, receiverStatus, solStatus, posType, velType, posSolAge, csFails, force_mavlink1=False): return self.send(self.novatel_diag_encode(timeStatus, receiverStatus, solStatus, posType, velType, posSolAge, csFails), force_mavlink1=force_mavlink1)
Transmits the diagnostics data from the Novatel OEMStar GPS timeStatus : The Time Status. See Table 8 page 27 Novatel OEMStar Manual (uint8_t) receiverStatus : Status Bitfield. See table 69 page 350 Novatel OEMstar Manual (uint32_t) solStatus : solution Status. See table 44 page 197 (uint8_t) posType : position type. See table 43 page 196 (uint8_t) velType : velocity type. See table 43 page 196 (uint8_t) posSolAge : Age of the position solution in seconds (float) csFails : Times the CRC has failed since boot (uint16_t)
371,736
def serialize(self, subject, *objects_or_combinators): ec_s = rdflib.BNode() if self.operator is not None: if subject is not None: yield subject, self.predicate, ec_s yield from oc(ec_s) yield from self._list.serialize(ec_s, self.operator, *objects_or_combinators) else: for thing in objects_or_combinators: if isinstance(thing, Combinator): object = rdflib.BNode() hasType = False for t in thing(object): if t[1] == rdf.type: hasType = True yield t if not hasType: yield object, rdf.type, owl.Class else: object = thing yield subject, self.predicate, object
object_combinators may also be URIRefs or Literals
371,737
def in_telephones(objet, pattern): objet = objet or [] if pattern == or not objet: return False return max(bool(re.search(pattern, t)) for t in objet)
abstractSearch dans une liste de téléphones.
371,738
def replace_greek_tex(self, name): name = name.replace(, ) name = name.replace(, ) for greek_txt, tex in self.greek2tex.items(): if greek_txt in name: name = name.replace(greek_txt, "{B}".format(B=tex)) return name
Replace text representing greek letters with greek letters.
371,739
def json(self): if self.fresh(): return self.__cached_json self.__last_read_time = time.time() self.__cached_json = self._router.get_instance(org_id=self.organizationId, instance_id=self.instanceId).json() return self.__cached_json
return __cached_json, if accessed withing 300 ms. This allows to optimize calls when many parameters of entity requires withing short time.
371,740
def _initGP(self): if self._inference==: signalPos = sp.where(sp.arange(self.n_randEffs)!=self.noisPos)[0][0] gp = GP2KronSum(Y=self.Y, F=self.sample_designs, A=self.trait_designs, Cg=self.trait_covars[signalPos], Cn=self.trait_covars[self.noisPos], R=self.sample_covars[signalPos]) else: mean = MeanKronSum(self.Y, self.sample_designs, self.trait_designs) Iok = vec(~sp.isnan(mean.Y))[:,0] if Iok.all(): Iok = None covar = SumCov(*[KronCov(self.trait_covars[i], self.sample_covars[i], Iok=Iok) for i in range(self.n_randEffs)]) gp = GP(covar = covar, mean = mean) self.gp = gp
Internal method for initialization of the GP inference objetct
371,741
def is_client_ip_address_whitelisted(request: AxesHttpRequest): if settings.AXES_NEVER_LOCKOUT_WHITELIST and is_ip_address_in_whitelist(request.axes_ip_address): return True if settings.AXES_ONLY_WHITELIST and is_ip_address_in_whitelist(request.axes_ip_address): return True return False
Check if the given request refers to a whitelisted IP.
371,742
def divide(self, layer=WORDS, by=SENTENCES): if not self.is_tagged(layer): self.tag(layer) if not self.is_tagged(by): self.tag(by) return divide(self[layer], self[by])
Divide the Text into pieces by keeping references to original elements, when possible. This is not possible only, if the _element_ is a multispan. Parameters ---------- element: str The element to collect and distribute in resulting bins. by: str Each resulting bin is defined by spans of this element. Returns ------- list of (list of dict)
371,743
def pvfactors_timeseries( solar_azimuth, solar_zenith, surface_azimuth, surface_tilt, timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo, n_pvrows=3, index_observed_pvrow=1, rho_front_pvrow=0.03, rho_back_pvrow=0.05, horizon_band_angle=15., run_parallel_calculations=True, n_workers_for_parallel_calcs=None): if isinstance(solar_azimuth, pd.Series): solar_azimuth = solar_azimuth.values if isinstance(solar_zenith, pd.Series): solar_zenith = solar_zenith.values if isinstance(surface_azimuth, pd.Series): surface_azimuth = surface_azimuth.values if isinstance(surface_tilt, pd.Series): surface_tilt = surface_tilt.values if isinstance(dni, pd.Series): dni = dni.values if isinstance(dhi, pd.Series): dhi = dhi.values from pvfactors.timeseries import (calculate_radiosities_parallel_perez, calculate_radiosities_serially_perez, get_average_pvrow_outputs) idx_slice = pd.IndexSlice pvarray_parameters = { : n_pvrows, : pvrow_height, : pvrow_width, : gcr, : albedo, : rho_front_pvrow, : rho_back_pvrow, : horizon_band_angle } if run_parallel_calculations: df_registries, df_custom_perez = calculate_radiosities_parallel_perez( pvarray_parameters, timestamps, solar_zenith, solar_azimuth, surface_tilt, surface_azimuth, dni, dhi, n_processes=n_workers_for_parallel_calcs) else: inputs = (pvarray_parameters, timestamps, solar_zenith, solar_azimuth, surface_tilt, surface_azimuth, dni, dhi) df_registries, df_custom_perez = calculate_radiosities_serially_perez( inputs) df_outputs = get_average_pvrow_outputs(df_registries, values=[], include_shading=True) ipoa_front = df_outputs.loc[:, idx_slice[index_observed_pvrow, , ]] ipoa_back = df_outputs.loc[:, idx_slice[index_observed_pvrow, , ]] df_registries = df_registries.set_index() return ipoa_front, ipoa_back, df_registries
Calculate front and back surface plane-of-array irradiance on a fixed tilt or single-axis tracker PV array configuration, and using the open-source "pvfactors" package. Please refer to pvfactors online documentation for more details: https://sunpower.github.io/pvfactors/ Parameters ---------- solar_azimuth: numeric Sun's azimuth angles using pvlib's azimuth convention (deg) solar_zenith: numeric Sun's zenith angles (deg) surface_azimuth: numeric Azimuth angle of the front surface of the PV modules, using pvlib's convention (deg) surface_tilt: numeric Tilt angle of the PV modules, going from 0 to 180 (deg) timestamps: datetime or DatetimeIndex List of simulation timestamps dni: numeric Direct normal irradiance (W/m2) dhi: numeric Diffuse horizontal irradiance (W/m2) gcr: float Ground coverage ratio of the pv array pvrow_height: float Height of the pv rows, measured at their center (m) pvrow_width: float Width of the pv rows in the considered 2D plane (m) albedo: float Ground albedo n_pvrows: int, default 3 Number of PV rows to consider in the PV array index_observed_pvrow: int, default 1 Index of the PV row whose incident irradiance will be returned. Indices of PV rows go from 0 to n_pvrows-1. rho_front_pvrow: float, default 0.03 Front surface reflectivity of PV rows rho_back_pvrow: float, default 0.05 Back surface reflectivity of PV rows horizon_band_angle: float, default 15 Elevation angle of the sky dome's diffuse horizon band (deg) run_parallel_calculations: bool, default True pvfactors is capable of using multiprocessing. Use this flag to decide to run calculations in parallel (recommended) or not. n_workers_for_parallel_calcs: int, default None Number of workers to use in the case of parallel calculations. The default value of 'None' will lead to using a value equal to the number of CPU's on the machine running the model. Returns ------- front_poa_irradiance: numeric Calculated incident irradiance on the front surface of the PV modules (W/m2) back_poa_irradiance: numeric Calculated incident irradiance on the back surface of the PV modules (W/m2) df_registries: pandas DataFrame DataFrame containing detailed outputs of the simulation; for instance the shapely geometries, the irradiance components incident on all surfaces of the PV array (for all timestamps), etc. In the pvfactors documentation, this is refered to as the "surface registry". References ---------- .. [1] Anoma, Marc Abou, et al. "View Factor Model and Validation for Bifacial PV and Diffuse Shade on Single-Axis Trackers." 44th IEEE Photovoltaic Specialist Conference. 2017.
371,744
def _update_state(self, change): self._block_updates = True try: self.position = self.axis.Location() self.direction = self.axis.Direction() finally: self._block_updates = False
Keep position and direction in sync with axis
371,745
def load(self, wishlist, calibration=None, resolution=None, polarization=None, level=None, generate=True, unload=True, **kwargs): dataset_keys = set(wishlist) needed_datasets = (self.wishlist | dataset_keys) - \ set(self.datasets.keys()) unknown = self.dep_tree.find_dependencies(needed_datasets, calibration=calibration, polarization=polarization, resolution=resolution, level=level) self.wishlist |= needed_datasets if unknown: unknown_str = ", ".join(map(str, unknown)) raise KeyError("Unknown datasets: {}".format(unknown_str)) self.read(**kwargs) if generate: keepables = self.generate_composites() else: missing = self.missing_datasets.copy() self._remove_failed_datasets(keepables) missing_str = ", ".join(str(x) for x in missing) LOG.warning("The following datasets were not created and may require " "resampling to be generated: {}".format(missing_str)) if unload: self.unload(keepables=keepables)
Read and generate requested datasets. When the `wishlist` contains `DatasetID` objects they can either be fully-specified `DatasetID` objects with every parameter specified or they can not provide certain parameters and the "best" parameter will be chosen. For example, if a dataset is available in multiple resolutions and no resolution is specified in the wishlist's DatasetID then the highest (smallest number) resolution will be chosen. Loaded `DataArray` objects are created and stored in the Scene object. Args: wishlist (iterable): Names (str), wavelengths (float), or DatasetID objects of the requested datasets to load. See `available_dataset_ids()` for what datasets are available. calibration (list, str): Calibration levels to limit available datasets. This is a shortcut to having to list each DatasetID in `wishlist`. resolution (list | float): Resolution to limit available datasets. This is a shortcut similar to calibration. polarization (list | str): Polarization ('V', 'H') to limit available datasets. This is a shortcut similar to calibration. level (list | str): Pressure level to limit available datasets. Pressure should be in hPa or mb. If an altitude is used it should be specified in inverse meters (1/m). The units of this parameter ultimately depend on the reader. generate (bool): Generate composites from the loaded datasets (default: True) unload (bool): Unload datasets that were required to generate the requested datasets (composite dependencies) but are no longer needed.
371,746
def _to_topology(self, atom_list, chains=None, residues=None): from mdtraj.core.topology import Topology if isinstance(chains, string_types): chains = [chains] if isinstance(chains, (list, set)): chains = tuple(chains) if isinstance(residues, string_types): residues = [residues] if isinstance(residues, (list, set)): residues = tuple(residues) top = Topology() atom_mapping = {} default_chain = top.add_chain() default_residue = top.add_residue(, default_chain) compound_residue_map = dict() atom_residue_map = dict() compound_chain_map = dict() atom_chain_map = dict() for atom in atom_list: if chains: if atom.name in chains: current_chain = top.add_chain() compound_chain_map[atom] = current_chain else: for parent in atom.ancestors(): if chains and parent.name in chains: if parent not in compound_chain_map: current_chain = top.add_chain() compound_chain_map[parent] = current_chain current_residue = top.add_residue( , current_chain) break else: current_chain = default_chain else: current_chain = default_chain atom_chain_map[atom] = current_chain if residues: if atom.name in residues: current_residue = top.add_residue(atom.name, current_chain) compound_residue_map[atom] = current_residue else: for parent in atom.ancestors(): if residues and parent.name in residues: if parent not in compound_residue_map: current_residue = top.add_residue( parent.name, current_chain) compound_residue_map[parent] = current_residue break else: current_residue = default_residue else: if chains: try: current_residue = next(current_chain.residues) except StopIteration: current_residue = top.add_residue(, current_chain) else: current_residue = default_residue atom_residue_map[atom] = current_residue try: elem = get_by_symbol(atom.name) except KeyError: elem = get_by_symbol("VS") at = top.add_atom(atom.name, elem, atom_residue_map[atom]) at.charge = atom.charge atom_mapping[atom] = at chains_to_remove = [ chain for chain in top.chains if chain.n_atoms == 0] residues_to_remove = [res for res in top.residues if res.n_atoms == 0] for chain in chains_to_remove: top._chains.remove(chain) for res in residues_to_remove: for chain in top.chains: try: chain._residues.remove(res) except ValueError: pass for atom1, atom2 in self.bonds(): if all(a in atom_mapping.keys() for a in [atom1, atom2]): top.add_bond(atom_mapping[atom1], atom_mapping[atom2]) return top
Create a mdtraj.Topology from a Compound. Parameters ---------- atom_list : list of mb.Compound Atoms to include in the topology chains : mb.Compound or list of mb.Compound Chain types to add to the topology residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. Returns ------- top : mdtraj.Topology See Also -------- mdtraj.Topology : Details on the mdtraj Topology object
371,747
async def _create_transaction(self, msg, *args, **kwargs): recv_msgs, get_key, _1, _2, _3 = self._msgs_registered[msg.__msgtype__] key = get_key(msg) if key in self._transactions[recv_msgs[0]]: for recv_msg in recv_msgs: self._transactions[recv_msg][key].append((args, kwargs)) else: for recv_msg in recv_msgs: self._transactions[recv_msg][key] = [(args, kwargs)] await ZMQUtils.send(self._socket, msg)
Create a transaction with the distant server :param msg: message to be sent :param args: args to be sent to the coroutines given to `register_transaction` :param kwargs: kwargs to be sent to the coroutines given to `register_transaction`
371,748
def to_dict(self): return { "timestamp": int(self.timestamp), "timezone": self.timezone, "time_of_day": self.time_of_day, "day_of_week": self.day_of_week, "day_of_month": self.day_of_month, "month_of_year": self.month_of_year, "utc_iso": self.utc_iso }
Returns the Time instance as a usable dictionary for craftai
371,749
def _req_rep_retry(self, request): retries_left = self.RETRIES while retries_left: self._logger.log(1, , request) self._send_request(request) socks = dict(self._poll.poll(self.TIMEOUT)) if socks.get(self._socket) == zmq.POLLIN: response = self._receive_response() self._logger.log(1, , response) return response, self.RETRIES - retries_left else: self._logger.debug( % retries_left) self._close_socket(confused=True) retries_left -= 1 if retries_left == 0: raise RuntimeError() time.sleep(self.SLEEP) self._start_socket()
Returns response and number of retries
371,750
def get_value(cls, bucket, key): obj = cls.get(bucket, key) return obj.value if obj else None
Get tag value.
371,751
def _list_machines(self): try: req = self.request(self.mist_client.uri++self.id+) machines = req.get().json() except: machines = {} if machines: for machine in machines: self._machines[machine[]] = Machine(machine, self) else: self._machines = {}
Request a list of all added machines. Populates self._machines dict with mist.client.model.Machine instances
371,752
def to_timepoints(self, unit=, offset=None): unit = Period.from_cfunits(unit) if offset is None: offset = 0. else: try: offset = Period(offset)/unit except TypeError: offset = offset step = self.stepsize/unit nmb = len(self) variable = numpy.linspace(offset, offset+step*(nmb-1), nmb) return variable
Return an |numpy.ndarray| representing the starting time points of the |Timegrid| object. The following examples identical with the ones of |Timegrid.from_timepoints| but reversed. By default, the time points are given in hours: >>> from hydpy import Timegrid >>> timegrid = Timegrid('2000-01-01', '2000-01-02', '6h') >>> timegrid.to_timepoints() array([ 0., 6., 12., 18.]) Other time units (`days` or `min`) can be defined (only the first character counts): >>> timegrid.to_timepoints(unit='d') array([ 0. , 0.25, 0.5 , 0.75]) Additionally, one can pass an `offset` that must be of type |int| or an valid |Period| initialization argument: >>> timegrid.to_timepoints(offset=24) array([ 24., 30., 36., 42.]) >>> timegrid.to_timepoints(offset='1d') array([ 24., 30., 36., 42.]) >>> timegrid.to_timepoints(unit='day', offset='1d') array([ 1. , 1.25, 1.5 , 1.75])
371,753
def repartition(self, num_partitions, repartition_function=None): from heronpy.streamlet.impl.repartitionbolt import RepartitionStreamlet if repartition_function is None: repartition_function = lambda x: x repartition_streamlet = RepartitionStreamlet(num_partitions, repartition_function, self) self._add_child(repartition_streamlet) return repartition_streamlet
Return a new Streamlet containing all elements of the this streamlet but having num_partitions partitions. Note that this is different from num_partitions(n) in that new streamlet will be created by the repartition call. If repartiton_function is not None, it is used to decide which parititons (from 0 to num_partitions -1), it should route each element to. It could also return a list of partitions if it wants to send it to multiple partitions.
371,754
def map_to_subset(self, file, outfile=None, ontology=None, subset=None, class_map=None, relations=None): if subset is not None: logging.info("Creating mapping for subset: {}".format(subset)) class_map = ontology.create_slim_mapping(subset=subset, relations=relations) if class_map is None: raise ValueError("Neither class_map not subset is set") col = self.ANNOTATION_CLASS_COLUMN file = self._ensure_file(file) tuples = [] for line in file: if line.startswith("!"): continue vals = line.split("\t") logging.info("LINE: {} VALS: {}".format(line, vals)) if len(vals) < col: raise ValueError("Line: {} has too few cols, expect class id in col {}".format(line, col)) cid = vals[col] if cid not in class_map or len(class_map[cid]) == 0: self.report.error(line, Report.UNMAPPED_ID, cid) continue else: for mcid in class_map[cid]: vals[col] = mcid line = "\t".join(vals) if outfile is not None: outfile.write(line) else: print(line)
Map a file to a subset, writing out results You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings Arguments --------- file: file Name or file object for input assoc file outfile: file Name or file object for output (mapped) assoc file; writes to stdout if not set subset: str Optional name of subset to map to, e.g. goslim_generic class_map: dict Mapping between asserted class ids and ids to map to. Many to many ontology: `Ontology` Ontology to extract subset from
371,755
def merge(self, other): for attr in self.attrs: if not getattr(other, attr, None) is None: setattr(self, attr, getattr(other, attr)) if other.raw: if not self.raw: self.raw = {} self.raw.update(other.raw)
Copy properties from other into self, skipping ``None`` values. Also merges the raw data. Args: other (SkypeObj): second object to copy fields from
371,756
def get_series_episodes(self, id, page=1): params = {: page} r = self.session.get(self.base_url + .format(id), params=params) if r.status_code == 404: return None r.raise_for_status() return r.json()
Get series episodes
371,757
def _simplify(elements): simplified = [] previous = None for element in elements: if element == "..": raise FormicError("Invalid glob:" " Cannot have in a glob: {0}".format( "/".join(elements))) elif element == ".": pass elif element == "**" and previous == "**": pass else: simplified.append(os.path.normcase(element)) previous = element if simplified[-1] == "": simplified[-1] = "**" if simplified[0] == "": del simplified[0] else: if simplified[0] != "**": simplified.insert(0, "**") return simplified
Simplifies and normalizes the list of elements removing redundant/repeated elements and normalising upper/lower case so case sensitivity is resolved here.
371,758
def to_kwargs(triangles): triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError() vertices = triangles.reshape((-1, 3)) faces = np.arange(len(vertices)).reshape((-1, 3)) kwargs = {: vertices, : faces} return kwargs
Convert a list of triangles to the kwargs for the Trimesh constructor. Parameters --------- triangles : (n, 3, 3) float Triangles in space Returns --------- kwargs : dict Keyword arguments for the trimesh.Trimesh constructor Includes keys 'vertices' and 'faces' Examples --------- >>> mesh = trimesh.Trimesh(**trimesh.triangles.to_kwargs(triangles))
371,759
def infer(examples, alt_rules=None): date_classes = _tag_most_likely(examples) if alt_rules: date_classes = _apply_rewrites(date_classes, alt_rules) else: date_classes = _apply_rewrites(date_classes, RULES) date_string = for date_class in date_classes: date_string += date_class.directive return date_string
Returns a datetime.strptime-compliant format string for parsing the *most likely* date format used in examples. examples is a list containing example date strings.
371,760
def add_column(self, tablename: str, fieldspec: FIELDSPEC_TYPE) -> int: sql = "ALTER TABLE {} ADD COLUMN {}".format( tablename, self.fielddefsql_from_fieldspec(fieldspec)) log.info(sql) return self.db_exec_literal(sql)
Adds a column to an existing table.
371,761
def unicode_dict(_dict): r = {} for k, v in iteritems(_dict): r[unicode_obj(k)] = unicode_obj(v) return r
Make sure keys and values of dict is unicode.
371,762
def take(self, axis, n): if not axis: partitions = self.partitions bin_lengths = self.block_lengths else: partitions = self.partitions.T bin_lengths = self.block_widths if n < 0: length_bins = np.cumsum(bin_lengths[::-1]) n *= -1 idx = int(np.digitize(n, length_bins)) if idx > 0: remaining = int(n - length_bins[idx - 1]) else: remaining = n if remaining == 0: result = partitions[-idx:] else: partitions = partitions[::-1] slice_obj = ( slice(-remaining, None) if axis == 0 else (slice(None), slice(-remaining, None)) ) func = self.preprocess_func(lambda df: df.iloc[slice_obj]) result = np.array( [ partitions[i] if i != idx else [obj.apply(func) for obj in partitions[i]] for i in range(idx + 1) ] )[::-1] else: length_bins = np.cumsum(bin_lengths) idx = int(np.digitize(n, length_bins)) if idx > 0: remaining = int(n - length_bins[idx - 1]) else: remaining = n if remaining == 0: result = partitions[:idx] else: slice_obj = ( slice(remaining) if axis == 0 else (slice(None), slice(remaining)) ) func = self.preprocess_func(lambda df: df.iloc[slice_obj]) result = np.array( [ partitions[i] if i != idx else [obj.apply(func) for obj in partitions[i]] for i in range(idx + 1) ] ) return self.__constructor__(result.T) if axis else self.__constructor__(result)
Take the first (or last) n rows or columns from the blocks Note: Axis = 0 will be equivalent to `head` or `tail` Axis = 1 will be equivalent to `front` or `back` Args: axis: The axis to extract (0 for extracting rows, 1 for extracting columns) n: The number of rows or columns to extract, negative denotes to extract from the bottom of the object Returns: A new BaseFrameManager object, the type of object that called this.
371,763
def getAdjEdges(self, networkId, nodeId, verbose=None): response=api(url=self.___url++str(networkId)++str(nodeId)+, method="GET", verbose=verbose, parse_params=False) return response
Returns a list of connected edges as SUIDs for the node specified by the `nodeId` and `networkId` parameters. :param networkId: SUID of the network containing the node :param nodeId: SUID of the node :param verbose: print more :returns: 200: successful operation
371,764
def verify_hash_type(self): if self.config[].lower() in [, ]: log.warning( , self.config[], self.__class__.__name__ )
Verify and display a nag-messsage to the log if vulnerable hash-type is used. :return:
371,765
def add_to_products(self, products=None, all_products=False): if all_products: if products: raise ArgumentError("When adding to all products, do not specify specific products") plist = "all" else: if not products: raise ArgumentError("You must specify products to which to add the user group") plist = {GroupTypes.productConfiguration.name: [product for product in products]} return self.append(add=plist)
Add user group to some product license configuration groups (PLCs), or all of them. :param products: list of product names the user should be added to :param all_products: a boolean meaning add to all (don't specify products in this case) :return: the Group, so you can do Group(...).add_to_products(...).add_users(...)
371,766
def _todict(cls): return dict((getattr(cls, attr), attr) for attr in dir(cls) if not attr.startswith())
generate a dict keyed by value
371,767
def model_config_from_estimator(instance_type, estimator, task_id, task_type, role=None, image=None, name=None, model_server_workers=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT): update_estimator_from_task(estimator, task_id, task_type) if isinstance(estimator, sagemaker.estimator.Estimator): model = estimator.create_model(role=role, image=image, vpc_config_override=vpc_config_override) elif isinstance(estimator, sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase): model = estimator.create_model(vpc_config_override=vpc_config_override) elif isinstance(estimator, sagemaker.estimator.Framework): model = estimator.create_model(model_server_workers=model_server_workers, role=role, vpc_config_override=vpc_config_override) else: raise TypeError( ) model.name = name return model_config(instance_type, model, role, image)
Export Airflow model config from a SageMaker estimator Args: instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge' estimator (sagemaker.model.EstimatorBase): The SageMaker estimator to export Airflow config from. It has to be an estimator associated with a training job. task_id (str): The task id of any airflow.contrib.operators.SageMakerTrainingOperator or airflow.contrib.operators.SageMakerTuningOperator that generates training jobs in the DAG. The model config is built based on the training job generated in this operator. task_type (str): Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator. Values can be 'training', 'tuning' or None (which means training job is not from any task). role (str): The ``ExecutionRoleArn`` IAM Role ARN for the model image (str): An container image to use for deploying the model name (str): Name of the model model_server_workers (int): The number of worker processes used by the inference server. If None, server will use one worker per vCPU. Only effective when estimator is a SageMaker framework. vpc_config_override (dict[str, list[str]]): Override for VpcConfig set on the model. Default: use subnets and security groups from this Estimator. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. Returns: dict: Model config that can be directly used by SageMakerModelOperator in Airflow. It can also be part of the config used by SageMakerEndpointOperator in Airflow.
371,768
def ChunkedTransformerLM(vocab_size, feature_depth=512, feedforward_depth=2048, num_layers=6, num_heads=8, dropout=0.1, chunk_selector=None, max_len=2048, mode=): stack = [ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, chunk_selector, mode) for _ in range(num_layers)] return layers.Serial( layers.ShiftRight(), layers.Map(layers.Embedding(feature_depth, vocab_size)), layers.Map(layers.Dropout(rate=dropout, mode=mode)), layers.PositionalEncoding(max_len=max_len), layers.Serial(*stack), layers.Map(layers.LayerNorm()), layers.Map(layers.Dense(vocab_size)), layers.Map(layers.LogSoftmax()), )
Transformer language model operating on chunks. The input to this model is a sequence presented as a list or tuple of chunks: (chunk1, chunk2, chunks3, ..., chunkN). Each chunk should have the same shape (batch, chunk-length) and together they represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN. Chunked Transformer emulates the operation of a Transformer on this long sequence except for the chunked attention layer, which may attend to only a subset of the chunks to reduce memory use. Args: vocab_size: int: vocab size feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_layers: int: number of encoder/decoder layers num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) chunk_selector: a function from chunk number to list of chunks to attend (if None, attends to the previous chunks which is equivalent to setting chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend to the current chunk with a causal mask too, selected chunks unmasked). max_len: int: maximum symbol length for positional encoding mode: str: 'train' or 'eval' Returns: the layer.
371,769
def _parse_commit(self, ref): lineno = self.lineno mark = self._get_mark_if_any() author = self._get_user_info(b, b, False) more_authors = [] while True: another_author = self._get_user_info(b, b, False) if another_author is not None: more_authors.append(another_author) else: break committer = self._get_user_info(b, b) message = self._get_data(b, b) from_ = self._get_from() merges = [] while True: merge = self._get_merge() if merge is not None: merges.extend(these_merges) else: break properties = {} while True: name_value = self._get_property() if name_value is not None: name, value = name_value properties[name] = value else: break return commands.CommitCommand(ref, mark, author, committer, message, from_, merges, list(self.iter_file_commands()), lineno=lineno, more_authors=more_authors, properties=properties)
Parse a commit command.
371,770
def from_symmop(cls, symmop, time_reversal): magsymmop = cls(symmop.affine_matrix, time_reversal, symmop.tol) return magsymmop
Initialize a MagSymmOp from a SymmOp and time reversal operator. Args: symmop (SymmOp): SymmOp time_reversal (int): Time reversal operator, +1 or -1. Returns: MagSymmOp object
371,771
def _cmd_up(self): revision = self._get_revision() if not self._rev: self._log(0, "upgrading current revision") else: self._log(0, "upgrading from revision %s" % revision) for rev in self._revisions[int(revision) - 1:]: sql_files = glob.glob(os.path.join(self._migration_path, rev, "*.up.sql")) sql_files.sort() self._exec(sql_files, rev) self._log(0, "done: upgraded revision to %s\n" % rev)
Upgrade to a revision
371,772
def extra_space_exists(str1: str, str2: str) -> bool: ls1, ls2 = len(str1), len(str2) if str1.isdigit(): if str2 in [, ]: return True if ls2 > 2 and str2[0] == and str2[1:].isdigit(): return True if str2.isdigit(): if str1 in CLOUD_LIST: return True if ls1 > 2 and str1.endswith() and str1[:-1].isdigit(): return True if ls2 == 1 and ls1 > 3 and str1[:2].isdigit() and in str1 and str1[3:].isdigit(): return True if str1 in [, ]: return True if str2 == and str1[-1].isdigit() \ and (str1[:5].isdigit() or (str1.startswith() and str1[3:5].isdigit())): return True if str2 == and ls1 >= 6 \ and (str1[:5].isdigit() or (str1.startswith() and str1[3:5].isdigit())) and str1[-1] == : return True if str2 in CLOUD_TRANSLATIONS and str2 not in CLOUD_LIST and ls1 >= 3 and str1[:3] in CLOUD_LIST: return True if str1 in [, ] and (str2.isdigit() or (str2.endswith() and str2[:-1].isdigit())): return True if str1 in [, ] and str2.find() != -1: return True return False
Return True if a space shouldn't exist between two items
371,773
def _count_extra_actions(self, game_image): proportional = self._bonus_tools[] t, l, b, r = proportional.region_in(game_image) token_region = game_image[t:b, l:r] game_h, game_w = game_image.shape[0:2] token_h = int(round(game_h * 27.0 / 960)) token_w = int(round(game_w * 22.0 / 1280)) sizes = (token_h, token_w), finder = v.TemplateFinder(pq_data.extra_action_template, sizes=sizes, acceptable_threshold=0.1, immediate_threshold=0.1) found_tokens = finder.locate_multiple_in(token_region) return len(found_tokens)
Count the number of extra actions for player in this turn.
371,774
def remove_secondary_linked_files(self, file_path=None, relpath=None, mimetype=None, time_origin=None, assoc_with=None): for attrib in self.linked_file_descriptors[:]: if file_path is not None and attrib[] != file_path: continue if relpath is not None and attrib[] != relpath: continue if mimetype is not None and attrib[] != mimetype: continue if time_origin is not None and\ attrib[] != time_origin: continue if assoc_with is not None and\ attrib[] != assoc_with: continue del(self.linked_file_descriptors[ self.linked_file_descriptors.index(attrib)])
Remove all secondary linked files that match all the criteria, criterias that are ``None`` are ignored. :param str file_path: Path of the file. :param str relpath: Relative filepath. :param str mimetype: Mimetype of the file. :param int time_origin: Time origin. :param str ex_from: Extracted from.
371,775
def DeleteOldRuns(self, cutoff_timestamp=None): if cutoff_timestamp is None: raise ValueError("cutoff_timestamp can't be None") return data_store.REL_DB.DeleteOldCronJobRuns( cutoff_timestamp=cutoff_timestamp)
Deletes runs that were started before the timestamp given.
371,776
def getDuration(self): starttime = self.getStartProcessDate() if not starttime: return 0 endtime = self.getDateVerified() or DateTime() duration = (endtime - starttime) * 24 * 60 return duration
Returns the time in minutes taken for this analysis. If the analysis is not yet 'ready to process', returns 0 If the analysis is still in progress (not yet verified), duration = date_verified - date_start_process Otherwise: duration = current_datetime - date_start_process :return: time in minutes taken for this analysis :rtype: int
371,777
def compile_id_list(self, polygon_id_list, nr_of_polygons): def all_equal(iterable): x = None for x in iterable: break for y in iterable: if x != y: return False return True zone_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY) counted_zones = {} for pointer_local, polygon_id in enumerate(polygon_id_list): zone_id = self.id_of(polygon_id) zone_id_list[pointer_local] = zone_id try: counted_zones[zone_id] += 1 except KeyError: counted_zones[zone_id] = 1 if len(counted_zones) == 1: return polygon_id_list, zone_id_list, True if all_equal(list(counted_zones.values())): return polygon_id_list, zone_id_list, False counted_zones_sorted = sorted(list(counted_zones.items()), key=lambda zone: zone[1]) sorted_polygon_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY) sorted_zone_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY) pointer_output = 0 for zone_id, amount in counted_zones_sorted: pointer_local = 0 detected_polygons = 0 while detected_polygons < amount: if zone_id_list[pointer_local] == zone_id: detected_polygons += 1 sorted_polygon_id_list[pointer_output] = polygon_id_list[pointer_local] sorted_zone_id_list[pointer_output] = zone_id pointer_output += 1 pointer_local += 1 return sorted_polygon_id_list, sorted_zone_id_list, False
sorts the polygons_id list from least to most occurrences of the zone ids (->speed up) only 4.8% of all shortcuts include polygons from more than one zone but only for about 0.4% sorting would be beneficial (zones have different frequencies) in most of those cases there are only two types of zones (= entries in counted_zones) and one of them has only one entry. the polygon lists of all single shortcut are already sorted (during compilation of the binary files) sorting should be used for closest_timezone_at(), because only in that use case the polygon lists are quite long (multiple shortcuts are being checked simultaneously). :param polygon_id_list: :param nr_of_polygons: length of polygon_id_list :return: sorted list of polygon_ids, sorted list of zone_ids, boolean: do all entries belong to the same zone
371,778
def _WsdlHasMethod(self, method_name): try: self._method_bindings.get(method_name) return True except ValueError: return False
Determine if a method is in the wsdl. Args: method_name: The name of the method. Returns: True if the method is in the wsdl, otherwise False.
371,779
def get_all_outcome_links_for_context_accounts(self, account_id, outcome_group_style=None, outcome_style=None): path = {} data = {} params = {} path["account_id"] = account_id if outcome_style is not None: params["outcome_style"] = outcome_style if outcome_group_style is not None: params["outcome_group_style"] = outcome_group_style self.logger.debug("GET /api/v1/accounts/{account_id}/outcome_group_links with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/outcome_group_links".format(**path), data=data, params=params, all_pages=True)
Get all outcome links for context.
371,780
def connect(self): "Connect to a host on a given (SSL) port." sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.host, self.port)) boto.log.debug("wrapping ssl socket; CA certificate file=%s", self.ca_certs) self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_certs) cert = self.sock.getpeercert() hostname = self.host.split(, 0)[0] if not ValidateCertificateHostname(cert, hostname): raise InvalidCertificateException(hostname, cert, \ % hostname)
Connect to a host on a given (SSL) port.
371,781
def get_appstruct(self): result = [] for k in self._get_keys(): result.append((k, getattr(self, k))) return result
return list of tuples keys and values corresponding to this model's data
371,782
def _negotiate_SOCKS4(self, dest_addr, dest_port): proxy_type, addr, port, rdns, username, password = self.proxy writer = self.makefile("wb") reader = self.makefile("rb", 0) try: remote_resolve = False try: addr_bytes = socket.inet_aton(dest_addr) except socket.error: writer.flush() resp = self._readall(reader, 8) if resp[0:1] != b"\x00": raise GeneralProxyError("SOCKS4 proxy server sent invalid data") status = ord(resp[1:2]) if status != 0x5A: error = SOCKS4_ERRORS.get(status, "Unknown error") raise SOCKS4Error("{0: self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0]) if remote_resolve: self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port else: self.proxy_peername = dest_addr, dest_port finally: reader.close() writer.close()
Negotiates a connection through a SOCKS4 server.
371,783
def readGraph(edgeList, nodeList = None, directed = False, idKey = , eSource = , eDest = ): progArgs = (0, "Starting to reading graphs") if metaknowledge.VERBOSE_MODE: progKwargs = { : False} else: progKwargs = { : True} with _ProgressBar(*progArgs, **progKwargs) as PBar: if directed: grph = nx.DiGraph() else: grph = nx.Graph() if nodeList: PBar.updateVal(0, "Reading " + nodeList) f = open(os.path.expanduser(os.path.abspath(nodeList))) nFile = csv.DictReader(f) for line in nFile: vals = line ndID = vals[idKey] del vals[idKey] if len(vals) > 0: grph.add_node(ndID, **vals) else: grph.add_node(ndID) f.close() PBar.updateVal(.25, "Reading " + edgeList) f = open(os.path.expanduser(os.path.abspath(edgeList))) eFile = csv.DictReader(f) for line in eFile: vals = line eFrom = vals[eSource] eTo = vals[eDest] del vals[eSource] del vals[eDest] if len(vals) > 0: grph.add_edge(eFrom, eTo, **vals) else: grph.add_edge(eFrom, eTo) PBar.finish("{} nodes and {} edges found".format(len(grph.nodes()), len(grph.edges()))) f.close() return grph
Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files. This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage. The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight. The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count. **Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised. **Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes. # Parameters _edgeList_ : `str` > a string giving the path to the edge list file _nodeList_ : `optional [str]` > default `None`, a string giving the path to the node list file _directed_ : `optional [bool]` > default `False`, if `True` the produced network is directed from _eSource_ to _eDest_ _idKey_ : `optional [str]` > default `'ID'`, the name of the ID column in the node list _eSource_ : `optional [str]` > default `'From'`, the name of the source column in the edge list _eDest_ : `optional [str]` > default `'To'`, the name of the destination column in the edge list # Returns `networkx Graph` > the graph described by the input files
371,784
def SensorShare(self, sensor_id, parameters): if not parameters[][]: parameters[].pop() if not parameters[][]: parameters[].pop() if self.__SenseApiCall__("/sensors/{0}/users".format(sensor_id), "POST", parameters = parameters): return True else: self.__error__ = "api call unsuccessful" return False
Share a sensor with a user @param sensor_id (int) - Id of sensor to be shared @param parameters (dictionary) - Additional parameters for the call @return (bool) - Boolean indicating whether the ShareSensor call was successful
371,785
def find_matching_builtin(self, node): for path in EQUIVALENT_ITERATORS.keys(): correct_alias = {path_to_node(path)} if self.aliases[node.func] == correct_alias: return path
Return matched keyword. If the node alias on a correct keyword (and only it), it matches.
371,786
def redact_http_basic_auth(output): url_re = redacted = r if sys.version_info >= (2, 7): return re.sub(url_re, redacted, output, flags=re.IGNORECASE) else: if re.search(url_re, output.lower()): return re.sub(url_re, redacted, output.lower()) return output
Remove HTTP user and password
371,787
def _load_schemas(self): types = (, , , , ) for type in types: schema_path = os.path.join(os.path.split(__file__)[0], % type) self._schema[type] = json.load(open(schema_path)) self._schema[type][][settings.LEVEL_FIELD] = { : 2, : } self._schema[][][][] = \ self.all_sessions() self._schema[][][][] = \ self.all_sessions() terms = [t[] for t in self.metadata[]] self._schema[][][][ ][][][] = terms
load all schemas into schema dict
371,788
def triple_apply(self, triple_apply_fn, mutated_fields, input_fields=None): sourcedestsourcedestdegreedegreedegreedegree__src_id__dst_idcid__idcidcidcidcid assert inspect.isfunction(triple_apply_fn), "Input must be a function" if not (type(mutated_fields) is list or type(mutated_fields) is str): raise TypeError() if not (input_fields is None or type(input_fields) is list or type(input_fields) is str): raise TypeError() if type(mutated_fields) == str: mutated_fields = [mutated_fields] if len(mutated_fields) is 0: raise ValueError() for f in [, , ]: if f in mutated_fields: raise ValueError( % f) all_fields = self.get_fields() if not set(mutated_fields).issubset(set(all_fields)): extra_fields = list(set(mutated_fields).difference(set(all_fields))) raise ValueError( % str(extra_fields)) if input_fields is None: input_fields = self.get_fields() elif type(input_fields) is str: input_fields = [input_fields] input_fields_set = set(input_fields + mutated_fields) input_fields = [x for x in self.get_fields() if x in input_fields_set] g = self.select_fields(input_fields) nativefn = None try: from .. import extensions nativefn = extensions._build_native_function_call(triple_apply_fn) except: pass if nativefn is not None: with cython_context(): return SGraph(_proxy=g.__proxy__.lambda_triple_apply_native(nativefn, mutated_fields)) else: with cython_context(): return SGraph(_proxy=g.__proxy__.lambda_triple_apply(triple_apply_fn, mutated_fields))
Apply a transform function to each edge and its associated source and target vertices in parallel. Each edge is visited once and in parallel. Modification to vertex data is protected by lock. The effect on the returned SGraph is equivalent to the following pseudocode: >>> PARALLEL FOR (source, edge, target) AS triple in G: ... LOCK (triple.source, triple.target) ... (source, edge, target) = triple_apply_fn(triple) ... UNLOCK (triple.source, triple.target) ... END PARALLEL FOR Parameters ---------- triple_apply_fn : function : (dict, dict, dict) -> (dict, dict, dict) The function to apply to each triple of (source_vertex, edge, target_vertex). This function must take as input a tuple of (source_data, edge_data, target_data) and return a tuple of (new_source_data, new_edge_data, new_target_data). All variables in the both tuples must be of dict type. This can also be a toolkit extension function which is compiled as a native shared library using SDK. mutated_fields : list[str] | str Fields that ``triple_apply_fn`` will mutate. Note: columns that are actually mutated by the triple apply function but not specified in ``mutated_fields`` will have undetermined effects. input_fields : list[str] | str, optional Fields that ``triple_apply_fn`` will have access to. The default is ``None``, which grants access to all fields. ``mutated_fields`` will always be included in ``input_fields``. Returns ------- out : SGraph A new SGraph with updated vertex and edge data. Only fields specified in the ``mutated_fields`` parameter are updated. Notes ----- - ``triple_apply`` does not currently support creating new fields in the lambda function. Examples -------- Import turicreate and set up the graph. >>> edges = turicreate.SFrame({'source': range(9), 'dest': range(1, 10)}) >>> g = turicreate.SGraph() >>> g = g.add_edges(edges, src_field='source', dst_field='dest') >>> g.vertices['degree'] = 0 Define the function to apply to each (source_node, edge, target_node) triple. >>> def degree_count_fn (src, edge, dst): src['degree'] += 1 dst['degree'] += 1 return (src, edge, dst) Apply the function to the SGraph. >>> g = g.triple_apply(degree_count_fn, mutated_fields=['degree']) Using native toolkit extension function: .. code-block:: c++ #include <turicreate/sdk/toolkit_function_macros.hpp> #include <vector> using namespace turi; std::vector<variant_type> connected_components_parameterized( std::map<std::string, flexible_type>& src, std::map<std::string, flexible_type>& edge, std::map<std::string, flexible_type>& dst, std::string column) { if (src[column] < dst[column]) dst[column] = src[column]; else src[column] = dst[column]; return {to_variant(src), to_variant(edge), to_variant(dst)}; } BEGIN_FUNCTION_REGISTRATION REGISTER_FUNCTION(connected_components_parameterized, "src", "edge", "dst", "column"); END_FUNCTION_REGISTRATION compiled into example.so >>> from example import connected_components_parameterized as cc >>> e = tc.SFrame({'__src_id':[1,2,3,4,5], '__dst_id':[3,1,2,5,4]}) >>> g = tc.SGraph().add_edges(e) >>> g.vertices['cid'] = g.vertices['__id'] >>> for i in range(2): ... g = g.triple_apply(lambda src, edge, dst: cc(src, edge, dst, 'cid'), ['cid'], ['cid']) >>> g.vertices['cid'] dtype: int Rows: 5 [4, 1, 1, 1, 4]
371,789
def _set_switchport_basic(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=switchport_basic.switchport_basic, is_container=, presence=False, yang_name="switchport-basic", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__switchport_basic = t if hasattr(self, ): self._set()
Setter method for switchport_basic, mapped from YANG variable /interface/ethernet/switchport_basic (container) If this variable is read-only (config: false) in the source YANG file, then _set_switchport_basic is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_switchport_basic() directly.
371,790
def get_memory_info(self): rss, vms = _psutil_bsd.get_process_memory_info(self.pid)[:2] return nt_meminfo(rss, vms)
Return a tuple with the process' RSS and VMS size.
371,791
def contains(self, x, ctrs, kdtree=None): return self.overlap(x, ctrs, kdtree=kdtree) > 0
Check if the set of balls contains `x`. Uses a K-D Tree to perform the search if provided.
371,792
def load_edbfile(file=None): import ephem,string,math if file is None: import tkFileDialog try: file=tkFileDialog.askopenfilename() except: return if file is None or file == : return f=open(file) lines=f.readlines() f.close() for line in lines: p=line.split() name=p[0].strip().upper() mpc_objs[name]=ephem.readdb(line) mpc_objs[name].compute() objInfoDict[name]="%6s %6s %6s\n" % ( string.center("a",6), string.center("e",6), string.center("i",6) ) objInfoDict[name]+="%6.2f %6.3f %6.2f\n" % (mpc_objs[name]._a,mpc_objs[name]._e,math.degrees(mpc_objs[name]._inc)) objInfoDict[name]+="%7.2f %7.2f\n" % ( mpc_objs[name].earth_distance, mpc_objs[name].mag) doplot(mpc_objs)
Load the targets from a file
371,793
def to_CAG(self): G = nx.DiGraph() for (name, attrs) in self.nodes(data=True): if attrs["type"] == "variable": for pred_fn in self.predecessors(name): if not any( fn_type in pred_fn for fn_type in ("condition", "decision") ): for pred_var in self.predecessors(pred_fn): G.add_node( self.nodes[pred_var]["basename"], **self.nodes[pred_var], ) G.add_node(attrs["basename"], **attrs) G.add_edge( self.nodes[pred_var]["basename"], attrs["basename"], ) if attrs["is_loop_index"]: G.add_edge(attrs["basename"], attrs["basename"]) return G
Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object. The CAG shows the influence relationships between the variables and elides the function nodes.
371,794
def run_rnaseq_ann_filter(data): data = to_single_data(data) if dd.get_vrn_file(data): eff_file = effects.add_to_vcf(dd.get_vrn_file(data), data)[0] if eff_file: data = dd.set_vrn_file(data, eff_file) ann_file = population.run_vcfanno(dd.get_vrn_file(data), data) if ann_file: data = dd.set_vrn_file(data, ann_file) variantcaller = dd.get_variantcaller(data) if variantcaller and ("gatk-haplotype" in variantcaller): filter_file = variation.gatk_filter_rnaseq(dd.get_vrn_file(data), data) data = dd.set_vrn_file(data, filter_file) vrn_file = dd.get_vrn_file(data) vrn_file = variation.filter_junction_variants(vrn_file, data) data = dd.set_vrn_file(data, vrn_file) return [[data]]
Run RNA-seq annotation and filtering.
371,795
def template(self, lambda_arn, role_arn, output=None, json=False): if not lambda_arn: raise ClickException("Lambda ARN is required to template.") if not role_arn: raise ClickException("Role ARN is required to template.") self.zappa.credentials_arn = role_arn template = self.zappa.create_stack_template( lambda_arn=lambda_arn, lambda_name=self.lambda_name, api_key_required=self.api_key_required, iam_authorization=self.iam_authorization, authorizer=self.authorizer, cors_options=self.cors, description=self.apigateway_description, policy=self.apigateway_policy, endpoint_configuration=self.endpoint_configuration ) if not output: template_file = self.lambda_name + + str(int(time.time())) + else: template_file = output with open(template_file, ) as out: out.write(bytes(template.to_json(indent=None, separators=(,)), "utf-8")) if not json: click.echo(click.style("Template created", fg="green", bold=True) + ": " + click.style(template_file, bold=True)) else: with open(template_file, ) as out: print(out.read())
Only build the template file.
371,796
def parse_sphinx_searchindex(searchindex): if hasattr(searchindex, ): searchindex = searchindex.decode() query = pos = searchindex.find(query) if pos < 0: raise ValueError() sel = _select_block(searchindex[pos:], , ) objects = _parse_dict_recursive(sel) query = pos = searchindex.find(query) if pos < 0: raise ValueError() filenames = searchindex[pos + len(query) + 1:] filenames = filenames[:filenames.find()] filenames = [f.strip() for f in filenames.split()] return filenames, objects
Parse a Sphinx search index Parameters ---------- searchindex : str The Sphinx search index (contents of searchindex.js) Returns ------- filenames : list of str The file names parsed from the search index. objects : dict The objects parsed from the search index.
371,797
def convert(source, ext=COMPLETE, fmt=HTML, dname=None): if dname and not ext & COMPATIBILITY: if os.path.isfile(dname): dname = os.path.abspath(os.path.dirname(dname)) source, _ = _expand_source(source, dname, fmt) _MMD_LIB.markdown_to_string.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_int] _MMD_LIB.markdown_to_string.restype = ctypes.c_char_p src = source.encode() return _MMD_LIB.markdown_to_string(src, ext, fmt).decode()
Converts a string of MultiMarkdown text to the requested format. Transclusion is performed if the COMPATIBILITY extension is not set, and dname is set to a valid directory Keyword arguments: source -- string containing MultiMarkdown text ext -- extension bitfield to pass to conversion process fmt -- flag indicating output format to use dname -- Path to use for transclusion - if None, transclusion functionality is bypassed
371,798
def get_src_or_dst_prompt(mode): _words = {"read": "from", "write": "to"} prompt = "Where would you like to {} your file(s) {}?\n" \ "1. Desktop ({})\n" \ "2. Downloads ({})\n" \ "3. Current ({})\n" \ "4. Browse".format(mode, _words[mode], os.path.expanduser(), os.path.expanduser(), os.getcwd()) return prompt
String together the proper prompt based on the mode :param str mode: "read" or "write" :return str prompt: The prompt needed
371,799
def deleted_records(endpoint): @utils.for_each_value def _deleted_records(self, key, value): deleted_recid = maybe_int(value.get()) if deleted_recid: return get_record_ref(deleted_recid, endpoint) return _deleted_records
Populate the ``deleted_records`` key.