code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def remove_user_permission(rid, uid, action='full'): rid = rid.replace('/', '%252F') try: acl_url = urljoin(_acl_url(), 'acls/{}/users/{}/{}'.format(rid, uid, action)) r = http.delete(acl_url) assert r.status_code == 204 except DCOSHTTPException as e: if e.response.status_code != 400: raise
Removes user permission on a given resource. :param uid: user id :type uid: str :param rid: resource ID :type rid: str :param action: read, write, update, delete or full :type action: str
def disconnect_all(self): if not self.__connected: return self.__disconnecting = True try: for signal in self.__signals: signal.disconnect(self.__signalReceived) if self.__slot is not None: self.__sigDelayed.disconnect(self.__slot) self.__connected = False finally: self.__disconnecting = False
Disconnects all signals and slots. If already in "disconnected" state, ignores the call.
def depth(self) -> int: if len(self.children): return 1 + max([child.depth for child in self.children]) else: return 1
Depth of the citation scheme .. example:: If we have a Book, Poem, Line system, and the citation we are looking at is Poem, depth is 1 :rtype: int :return: Depth of the citation scheme
def add_bid(self, bid): self._bids[bid[0]] = float(bid[1]) if bid[1] == "0.00000000": del self._bids[bid[0]]
Add a bid to the cache :param bid: :return:
def rekey(self, key, nonce=None, recovery_key=False): params = { 'key': key, } if nonce is not None: params['nonce'] = nonce api_path = '/v1/sys/rekey/update' if recovery_key: api_path = '/v1/sys/rekey-recovery-key/update' response = self._adapter.put( url=api_path, json=params, ) return response.json()
Enter a single recovery key share to progress the rekey of the Vault. If the threshold number of recovery key shares is reached, Vault will complete the rekey. Otherwise, this API must be called multiple times until that threshold is met. The rekey nonce operation must be provided with each call. Supported methods: PUT: /sys/rekey/update. Produces: 200 application/json PUT: /sys/rekey-recovery-key/update. Produces: 200 application/json :param key: Specifies a single recovery share key. :type key: str | unicode :param nonce: Specifies the nonce of the rekey operation. :type nonce: str | unicode :param recovery_key: If true, send requests to "rekey-recovery-key" instead of "rekey" api path. :type recovery_key: bool :return: The JSON response of the request. :rtype: dict
def element_data_str(z, eldata): sym = lut.element_sym_from_Z(z, True) cs = contraction_string(eldata) if cs == '': cs = '(no electron shells)' s = '\nElement: {} : {}\n'.format(sym, cs) if 'electron_shells' in eldata: for shellidx, shell in enumerate(eldata['electron_shells']): s += electron_shell_str(shell, shellidx) + '\n' if 'ecp_potentials' in eldata: s += 'ECP: Element: {} Number of electrons: {}\n'.format(sym, eldata['ecp_electrons']) for pot in eldata['ecp_potentials']: s += ecp_pot_str(pot) + '\n' return s
Return a string with all data for an element This includes shell and ECP potential data Parameters ---------- z : int or str Element Z-number eldata: dict Data for the element to be printed
def merge_close(events, min_interval, merge_to_longer=False): half_iv = min_interval / 2 merged = [] for higher in events: if not merged: merged.append(higher) else: lower = merged[-1] if higher['start'] - half_iv <= lower['end'] + half_iv: if merge_to_longer and (higher['end'] - higher['start'] > lower['end'] - lower['start']): start = min(lower['start'], higher['start']) higher.update({'start': start}) merged[-1] = higher else: end = max(lower['end'], higher['end']) merged[-1].update({'end': end}) else: merged.append(higher) return merged
Merge events that are separated by a less than a minimum interval. Parameters ---------- events : list of dict events with 'start' and 'end' times, from one or several channels. **Events must be sorted by their start time.** min_interval : float minimum delay between consecutive events, in seconds merge_to_longer : bool (default: False) If True, info (chan, peak, etc.) from the longer of the 2 events is kept. Otherwise, info from the earlier onset spindle is kept. Returns ------- list of dict original events list with close events merged.
def track_list(self,*args): noargs = len(args) == 0 return np.unique(self.track) if noargs else np.unique(self.track.compress(args[0]))
return the list of tracks contained if the dataset
def build_vrt(source_file, destination_file, **kwargs): with rasterio.open(source_file) as src: vrt_doc = boundless_vrt_doc(src, **kwargs).tostring() with open(destination_file, 'wb') as dst: dst.write(vrt_doc) return destination_file
Make a VRT XML document and write it in file. Parameters ---------- source_file : str, file object or pathlib.Path object Source file. destination_file : str Destination file. kwargs : optional Additional arguments passed to rasterio.vrt._boundless_vrt_doc Returns ------- out : str The path to the destination file.
def set_auth_request(self, interface_id, address=None): self.interface.set_auth_request(interface_id, address) self._engine.update()
Set the authentication request field for the specified engine.
def parse_kegg_gene_metadata(infile): metadata = defaultdict(str) with open(infile) as mf: kegg_parsed = bs_kegg.parse(mf.read()) if 'DBLINKS' in kegg_parsed.keys(): if 'UniProt' in kegg_parsed['DBLINKS']: unis = str(kegg_parsed['DBLINKS']['UniProt']).split(' ') if isinstance(unis, list): metadata['uniprot'] = unis[0] else: metadata['uniprot'] = unis if 'NCBI-ProteinID' in kegg_parsed['DBLINKS']: metadata['refseq'] = str(kegg_parsed['DBLINKS']['NCBI-ProteinID']) if 'STRUCTURE' in kegg_parsed.keys(): metadata['pdbs'] = str(kegg_parsed['STRUCTURE']['PDB']).split(' ') else: metadata['pdbs'] = None if 'ORGANISM' in kegg_parsed.keys(): metadata['taxonomy'] = str(kegg_parsed['ORGANISM']) return metadata
Parse the KEGG flatfile and return a dictionary of metadata. Dictionary keys are: refseq uniprot pdbs taxonomy Args: infile: Path to KEGG flatfile Returns: dict: Dictionary of metadata
def _int2coord(x, y, dim): assert dim >= 1 assert x < dim assert y < dim lng = x / dim * 360 - 180 lat = y / dim * 180 - 90 return lng, lat
Convert x, y values in dim x dim-grid coordinate system into lng, lat values. Parameters: x: int x value of point [0, dim); corresponds to longitude y: int y value of point [0, dim); corresponds to latitude dim: int Number of coding points each x, y value can take. Corresponds to 2^level of the hilbert curve. Returns: Tuple[float, float]: (lng, lat) lng longitude value of coordinate [-180.0, 180.0]; corresponds to X axis lat latitude value of coordinate [-90.0, 90.0]; corresponds to Y axis
def create_cache_settings(self, service_id, version_number, name, action, ttl=None, stale_ttl=None, cache_condition=None): body = self._formdata({ "name": name, "action": action, "ttl": ttl, "stale_ttl": stale_ttl, "cache_condition": cache_condition, }, FastlyCacheSettings.FIELDS) content = self._fetch("/service/%s/version/%d/cache_settings" % (service_id, version_number), method="POST", body=body) return FastlyCacheSettings(self, content)
Create a new cache settings object.
def partition(self, ref=None, **kwargs): from ambry.orm.exc import NotFoundError from sqlalchemy.orm.exc import NoResultFound if not ref and not kwargs: return None if ref: for p in self.partitions: if ref == p.name or ref == p.vname or ref == p.vid or ref == p.id: p._bundle = self return p raise NotFoundError("No partition found for '{}' (a)".format(ref)) elif kwargs: from ..identity import PartitionNameQuery pnq = PartitionNameQuery(**kwargs) try: p = self.partitions._find_orm(pnq).one() if p: p._bundle = self return p except NoResultFound: raise NotFoundError("No partition found for '{}' (b)".format(kwargs))
Return a partition in this bundle for a vid reference or name parts
def vhat(v1): v1 = stypes.toDoubleVector(v1) vout = stypes.emptyDoubleVector(3) libspice.vhat_c(v1, vout) return stypes.cVectorToPython(vout)
Find the unit vector along a double precision 3-dimensional vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vhat_c.html :param v1: Vector to be unitized. :type v1: 3-Element Array of floats :return: Unit vector v / abs(v). :rtype: 3-Element Array of floats
def qteEmulateKeypresses(self, keysequence): keysequence = QtmacsKeysequence(keysequence) key_list = keysequence.toQKeyEventList() if len(key_list) > 0: for event in key_list: self._qteKeyEmulationQueue.append(event)
Emulate the Qt key presses that define ``keysequence``. The method will put the keys into a queue and process them one by one once the event loop is idle, ie. the event loop executes all signals and macros associated with the emulated key press first before the next one is emulated. |Args| * ``keysequence`` (**QtmacsKeysequence**): the key sequence to emulate. |Returns| * **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
def _init_cursor(self): if hasattr(curses, 'noecho'): curses.noecho() if hasattr(curses, 'cbreak'): curses.cbreak() self.set_cursor(0)
Init cursors.
def calculate_affinity(self, username): scores = self.comparison(username) if len(scores) <= 10: raise NoAffinityError("Shared rated anime count between " "`{}` and `{}` is less than eleven" .format(self._base_user, username)) values = scores.values() scores1, scores2 = list(zip(*values)) pearson = calcs.pearson(scores1, scores2) pearson *= 100 if self._round is not False: pearson = round(pearson, self._round) return models.Affinity(affinity=pearson, shared=len(scores))
Get the affinity between the "base user" and ``username``. .. note:: The data returned will be a namedtuple, with the affinity and shared rated anime. This can easily be separated as follows (using the user ``Luna`` as ``username``): .. code-block:: python affinity, shared = ma.calculate_affinity("Luna") Alternatively, the following also works: .. code-block:: python affinity = ma.calculate_affinity("Luna") with the affinity and shared available as ``affinity.affinity`` and ``affinity.shared`` respectively. .. note:: The final affinity value may or may not be rounded, depending on the value of :attr:`._round`, set at class initialisation. :param str username: The username to calculate affinity with :return: (float affinity, int shared) :rtype: tuple
def _operator_handling(self, cursor): values = self._literal_handling(cursor) retval = ''.join([str(val) for val in values]) return retval
Returns a string with the literal that are part of the operation.
def remove_notes(data): has_text = data.iloc[:, 0].astype(str).str.contains('(?!e-)[a-zA-Z]') text_rows = list(has_text.index[has_text]) return data.drop(text_rows)
Omit notes from a DataFrame object, where notes are identified as rows with non-numerical entries in the first column. :param data: DataFrame object to remove notes from :type data: Pandas.DataFrame :return: DataFrame object with no notes :rtype: Pandas.DataFrame
def plot_fit(self, **kwargs): import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) plt.figure(figsize=figsize) date_index = self.index[self.ar:self.data.shape[0]] mu, Y = self._model(self.latent_variables.get_z_values()) plt.plot(date_index,Y,label='Data') plt.plot(date_index,mu,label='Filter',c='black') plt.title(self.data_name) plt.legend(loc=2) plt.show()
Plots the fit of the model against the data
def add(self, opener): index = len(self.openers) self.openers[index] = opener for name in opener.names: self.registry[name] = index
Adds an opener to the registry :param opener: Opener object :type opener: Opener inherited object
def _setSampleSizeBytes(self): self.sampleSizeBytes = self.getPacketSize() if self.sampleSizeBytes > 0: self.maxBytesPerFifoRead = (32 // self.sampleSizeBytes)
updates the current record of the packet size per sample and the relationship between this and the fifo reads.
def is_null(*symbols): from symbols.symbol_ import Symbol for sym in symbols: if sym is None: continue if not isinstance(sym, Symbol): return False if sym.token == 'NOP': continue if sym.token == 'BLOCK': if not is_null(*sym.children): return False continue return False return True
True if no nodes or all the given nodes are either None, NOP or empty blocks. For blocks this applies recursively
def dumps(self): old_stream = self.stream try: self.stream = six.StringIO() self.write_table() tabular_text = self.stream.getvalue() finally: self.stream = old_stream return tabular_text
Get rendered tabular text from the table data. Only available for text format table writers. Returns: str: Rendered tabular text.
def list(ctx, show_hidden, oath_type, period): ensure_validated(ctx) controller = ctx.obj['controller'] creds = [cred for cred in controller.list() if show_hidden or not cred.is_hidden ] creds.sort() for cred in creds: click.echo(cred.printable_key, nl=False) if oath_type: click.echo(u', {}'.format(cred.oath_type.name), nl=False) if period: click.echo(', {}'.format(cred.period), nl=False) click.echo()
List all credentials. List all credentials stored on your YubiKey.
def result(retn): ok, valu = retn if ok: return valu name, info = valu ctor = getattr(s_exc, name, None) if ctor is not None: raise ctor(**info) info['errx'] = name raise s_exc.SynErr(**info)
Return a value or raise an exception from a retn tuple.
def from_scanner(self, x, y, z): if self.transform is None: raise ValueError("No transform set for MRSData object {}".format(self)) transformed_point = numpy.linalg.inv(self.transform) * numpy.matrix([x, y, z, 1]).T return numpy.squeeze(numpy.asarray(transformed_point))[0:3]
Converts a 3d position in the scanner reference frame to the MRSData space :param x: :param y: :param z: :return:
def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS): connection = connections[using] with connection.cursor() as cursor: cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA]) schema_exists = cursor.fetchone()[0] if schema_exists: return False cursor.execute("CREATE SCHEMA %s;", [AsIs(settings.HEROKU_CONNECT_SCHEMA)]) with connection.schema_editor() as editor: for model in get_heroku_connect_models(): editor.create_model(model) editor.execute('CREATE EXTENSION IF NOT EXISTS "hstore";') from heroku_connect.models import (TriggerLog, TriggerLogArchive) for cls in [TriggerLog, TriggerLogArchive]: editor.create_model(cls) return True
Create Heroku Connect schema. Note: This function is only meant to be used for local development. In a production environment the schema will be created by Heroku Connect. Args: using (str): Alias for database connection. Returns: bool: ``True`` if the schema was created, ``False`` if the schema already exists.
def _proc_cyclic(self): main_axis, rot = max(self.rot_sym, key=lambda v: v[1]) self.sch_symbol = "C{}".format(rot) mirror_type = self._find_mirror(main_axis) if mirror_type == "h": self.sch_symbol += "h" elif mirror_type == "v": self.sch_symbol += "v" elif mirror_type == "": if self.is_valid_op(SymmOp.rotoreflection(main_axis, angle=180 / rot)): self.sch_symbol = "S{}".format(2 * rot)
Handles cyclic group molecules.
def matching(self, packages): print("\nNot found package with the name [ {0}{1}{2} ]. " "Matching packages:\nNOTE: Not dependenc" "ies are resolved\n".format(self.meta.color["CYAN"], "".join(packages), self.meta.color["ENDC"]))
Message for matching packages
def _client_allowed(self): client_ip = self._client_address[0] if not client_ip in self._settings.allowed_clients and \ not 'ALL' in self._settings.allowed_clients: content = 'Access from host {} forbidden.'.format(client_ip).encode('utf-8') self._send_content(content, 'text/html') return False return True
Check if client is allowed to connect to this server.
def com_google_fonts_check_metadata_subsets_order(family_metadata): expected = list(sorted(family_metadata.subsets)) if list(family_metadata.subsets) != expected: yield FAIL, ("METADATA.pb subsets are not sorted " "in alphabetical order: Got ['{}']" " and expected ['{}']").format("', '".join(family_metadata.subsets), "', '".join(expected)) else: yield PASS, "METADATA.pb subsets are sorted in alphabetical order."
METADATA.pb subsets should be alphabetically ordered.
def get_decision_trees_bulk(self, payload, version=DEFAULT_DECISION_TREE_VERSION): headers = self._headers.copy() headers["x-craft-ai-tree-version"] = version valid_indices, invalid_indices, invalid_dts = self._check_agent_id_bulk(payload) if self._config["decisionTreeRetrievalTimeout"] is False: return self._get_decision_trees_bulk(payload, valid_indices, invalid_indices, invalid_dts) start = current_time_ms() while True: now = current_time_ms() if now - start > self._config["decisionTreeRetrievalTimeout"]: raise CraftAiLongRequestTimeOutError() try: return self._get_decision_trees_bulk(payload, valid_indices, invalid_indices, invalid_dts) except CraftAiLongRequestTimeOutError: continue
Get a group of decision trees. :param list payload: contains the informations necessary for getting the trees. It's in the form [{"id": agent_id, "timestamp": timestamp}] With id a str containing only characters in "a-zA-Z0-9_-" and must be between 1 and 36 characters. It must referenced an existing agent. With timestamp an positive and not null integer. :param version: version of the tree to get. :type version: str or int. :default version: default version of the tree. :return: Decision trees. :rtype: list of dict. :raises CraftAiBadRequestError: if all of the ids are invalid or referenced non existing agents or all of the timestamp are invalid. :raises CraftAiLongRequestTimeOutError: if the API doesn't get the tree in the time given by the configuration.
def vectors(self, failed=False): if failed not in ["all", False, True]: raise ValueError("{} is not a valid vector failed".format(failed)) if failed == "all": return Vector.query.filter_by(network_id=self.id).all() else: return Vector.query.filter_by(network_id=self.id, failed=failed).all()
Get vectors in the network. failed = { False, True, "all" } To get the vectors to/from to a specific node, see Node.vectors().
def alive(opts): dev = conn() thisproxy['conn'].connected = ping() if not dev.connected: __salt__['event.fire_master']({}, 'junos/proxy/{}/stop'.format( opts['proxy']['host'])) return dev.connected
Validate and return the connection status with the remote device. .. versionadded:: 2018.3.0
def sam_list_paired(sam): list = [] pair = ['1', '2'] prev = '' for file in sam: for line in file: if line.startswith('@') is False: line = line.strip().split() id, map = line[0], int(line[1]) if map != 4 and map != 8: read = id.rsplit('/')[0] if read == prev: list.append(read) prev = read return set(list)
get a list of mapped reads require that both pairs are mapped in the sam file in order to remove the reads
def publish(self, message): message_data = self._to_data(message) self._encode_invoke(topic_publish_codec, message=message_data)
Publishes the message to all subscribers of this topic :param message: (object), the message to be published.
def cleanTempDirs(job): if job is CWLJob and job._succeeded: for tempDir in job.openTempDirs: if os.path.exists(tempDir): shutil.rmtree(tempDir) job.openTempDirs = []
Remove temporarly created directories.
def _parse_pg_lscluster(output): cluster_dict = {} for line in output.splitlines(): version, name, port, status, user, datadir, log = ( line.split()) cluster_dict['{0}/{1}'.format(version, name)] = { 'port': int(port), 'status': status, 'user': user, 'datadir': datadir, 'log': log} return cluster_dict
Helper function to parse the output of pg_lscluster
def ensure_permissions(path, user, group, permissions, maxdepth=-1): if not os.path.exists(path): log("File '%s' does not exist - cannot set permissions" % (path), level=WARNING) return _user = pwd.getpwnam(user) os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) os.chmod(path, permissions) if maxdepth == 0: log("Max recursion depth reached - skipping further recursion", level=DEBUG) return elif maxdepth > 0: maxdepth -= 1 if os.path.isdir(path): contents = glob.glob("%s/*" % (path)) for c in contents: ensure_permissions(c, user=user, group=group, permissions=permissions, maxdepth=maxdepth)
Ensure permissions for path. If path is a file, apply to file and return. If path is a directory, apply recursively (if required) to directory contents and return. :param user: user name :param group: group name :param permissions: octal permissions :param maxdepth: maximum recursion depth. A negative maxdepth allows infinite recursion and maxdepth=0 means no recursion. :returns: None
def _handle_delete_file(self, data): file = self.room.filedict.get(data) if file: self.room.filedict = data, None self.conn.enqueue_data("delete_file", file)
Handle files being removed
def previous_layout(pymux, variables): " Select previous layout. " pane = pymux.arrangement.get_active_window() if pane: pane.select_previous_layout()
Select previous layout.
def samefile(a: str, b: str) -> bool: try: return os.path.samefile(a, b) except OSError: return os.path.normpath(a) == os.path.normpath(b)
Check if two pathes represent the same file.
def _create_tmp_file(config): tmp_dir = tempfile.gettempdir() rand_fname = py23_compat.text_type(uuid.uuid4()) filename = os.path.join(tmp_dir, rand_fname) with open(filename, 'wt') as fobj: fobj.write(config) return filename
Write temp file and for use with inline config and SCP.
def create(verbose): click.secho('Creating all tables!', fg='yellow', bold=True) with click.progressbar(_db.metadata.sorted_tables) as bar: for table in bar: if verbose: click.echo(' Creating table {0}'.format(table)) table.create(bind=_db.engine, checkfirst=True) create_alembic_version_table() click.secho('Created all tables!', fg='green')
Create tables.
def data(self): for pkg in self.installed: if os.path.isfile(self.meta.pkg_path + pkg): name = split_package(pkg)[0] for log in self.logs: deps = Utils().read_file(self.dep_path + log) for dep in deps.splitlines(): if name == dep: if name not in self.dmap.keys(): self.dmap[name] = [log] if not self.count_pkg: self.count_pkg = 1 else: self.dmap[name] += [log] self.count_packages()
Check all installed packages and create dictionary database
def _GetPathSegmentIndexForOccurrenceWeights( self, occurrence_weights, value_weights): largest_weight = occurrence_weights.GetLargestWeight() if largest_weight > 0: occurrence_weight_indexes = occurrence_weights.GetIndexesForWeight( largest_weight) number_of_occurrence_indexes = len(occurrence_weight_indexes) else: number_of_occurrence_indexes = 0 path_segment_index = None if number_of_occurrence_indexes == 0: path_segment_index = self._GetPathSegmentIndexForValueWeights( value_weights) elif number_of_occurrence_indexes == 1: path_segment_index = occurrence_weight_indexes[0] else: largest_weight = 0 for occurrence_index in occurrence_weight_indexes: value_weight = value_weights.GetWeightForIndex(occurrence_index) if not path_segment_index or largest_weight < value_weight: largest_weight = value_weight path_segment_index = occurrence_index return path_segment_index
Retrieves the index of the path segment based on occurrence weights. Args: occurrence_weights: the occurrence weights object (instance of _PathSegmentWeights). value_weights: the value weights object (instance of _PathSegmentWeights). Returns: An integer containing the path segment index.
def get_algorithm_config(xs): if isinstance(xs, dict): xs = [xs] for x in xs: if is_std_config_arg(x): return x["algorithm"] elif is_nested_config_arg(x): return x["config"]["algorithm"] elif isinstance(x, (list, tuple)) and is_nested_config_arg(x[0]): return x[0]["config"]["algorithm"] raise ValueError("Did not find algorithm configuration in items: {0}" .format(pprint.pformat(xs)))
Flexibly extract algorithm configuration for a sample from any function arguments.
def file_crc32(filename, chunksize=_CHUNKSIZE): check = 0 with open(filename, 'rb') as fd: for data in iter(lambda: fd.read(chunksize), ""): check = crc32(data, check) return check
calculate the CRC32 of the contents of filename
def _ancestors_or_self( self, qname: Union[QualName, bool] = None) -> List[InstanceNode]: res = [] if qname and self.qual_name != qname else [self] return res + self.up()._ancestors(qname)
XPath - return the list of receiver's ancestors including itself.
def make_workspace(measurement, channel=None, name=None, silence=False): context = silence_sout_serr if silence else do_nothing with context(): hist2workspace = ROOT.RooStats.HistFactory.HistoToWorkspaceFactoryFast( measurement) if channel is not None: workspace = hist2workspace.MakeSingleChannelModel( measurement, channel) else: workspace = hist2workspace.MakeCombinedModel(measurement) workspace = asrootpy(workspace) keepalive(workspace, measurement) if name is not None: workspace.SetName('workspace_{0}'.format(name)) return workspace
Create a workspace containing the model for a measurement If `channel` is None then include all channels in the model If `silence` is True, then silence HistFactory's output on stdout and stderr.
def thread_stopped(self): if self.__task is not None: if self.__task.stop_event().is_set() is False: self.__task.stop() self.__task = None
Stop scheduled task beacuse of watchdog stop :return: None
def get_boxes_and_lines(ax, labels): labels_u, labels_u_line = get_labels(labels) boxes = ax.findobj(mpl.text.Annotation) lines = ax.findobj(mpl.lines.Line2D) lineid_boxes = [] lineid_lines = [] for box in boxes: l = box.get_label() try: loc = labels_u.index(l) except ValueError: continue lineid_boxes.append(box) for line in lines: l = line.get_label() try: loc = labels_u_line.index(l) except ValueError: continue lineid_lines.append(line) return lineid_boxes, lineid_lines
Get boxes and lines using labels as id.
def encode(self, cube_dimensions): return np.asarray([getattr(cube_dimensions[d], s) for d in self._dimensions for s in self._schema], dtype=np.int32)
Produces a numpy array of integers which encode the supplied cube dimensions.
def process_exception(self, request, exception): log_format = self._get_log_format(request) if log_format is None: return params = self._get_parameters_from_request(request, True) params['message'] = exception params['http_status'] = '-' self.OPERATION_LOG.info(log_format, params)
Log error info when exception occurred.
def show_instances(server, cim_class): if cim_class == 'CIM_RegisteredProfile': for inst in server.profiles: print(inst.tomof()) return for ns in server.namespaces: try: insts = server.conn.EnumerateInstances(cim_class, namespace=ns) if len(insts): print('INSTANCES OF %s ns=%s' % (cim_class, ns)) for inst in insts: print(inst.tomof()) except pywbem.Error as er: if er.status_code != pywbem.CIM_ERR_INVALID_CLASS: print('%s namespace %s Enumerate failed for conn=%s\n' 'exception=%s' % (cim_class, ns, server, er))
Display the instances of the CIM_Class defined by cim_class. If the namespace is None, use the interop namespace. Search all namespaces for instances except for CIM_RegisteredProfile
def authorize(login, password, scopes, note='', note_url='', client_id='', client_secret='', two_factor_callback=None): gh = GitHub() gh.login(two_factor_callback=two_factor_callback) return gh.authorize(login, password, scopes, note, note_url, client_id, client_secret)
Obtain an authorization token for the GitHub API. :param str login: (required) :param str password: (required) :param list scopes: (required), areas you want this token to apply to, i.e., 'gist', 'user' :param str note: (optional), note about the authorization :param str note_url: (optional), url for the application :param str client_id: (optional), 20 character OAuth client key for which to create a token :param str client_secret: (optional), 40 character OAuth client secret for which to create the token :param func two_factor_callback: (optional), function to call when a Two-Factor Authentication code needs to be provided by the user. :returns: :class:`Authorization <Authorization>`
def _set_subset_indices(self, y_min, y_max, x_min, x_max): y_coords, x_coords = self.xd.lsm.coords dx = self.xd.lsm.dx dy = self.xd.lsm.dy lsm_y_indices_from_y, lsm_x_indices_from_y = \ np.where((y_coords >= (y_min - 2*dy)) & (y_coords <= (y_max + 2*dy))) lsm_y_indices_from_x, lsm_x_indices_from_x = \ np.where((x_coords >= (x_min - 2*dx)) & (x_coords <= (x_max + 2*dx))) lsm_y_indices = np.intersect1d(lsm_y_indices_from_y, lsm_y_indices_from_x) lsm_x_indices = np.intersect1d(lsm_x_indices_from_y, lsm_x_indices_from_x) self.xslice = slice(np.amin(lsm_x_indices), np.amax(lsm_x_indices)+1) self.yslice = slice(np.amin(lsm_y_indices), np.amax(lsm_y_indices)+1)
load subset based on extent
def deduce_helpful_msg(req): msg = "" if os.path.exists(req): msg = " It does exist." try: with open(req, 'r') as fp: next(parse_requirements(fp.read())) msg += " The argument you provided " + \ "(%s) appears to be a" % (req) + \ " requirements file. If that is the" + \ " case, use the '-r' flag to install" + \ " the packages specified within it." except RequirementParseError: logger.debug("Cannot parse '%s' as requirements \ file" % (req), exc_info=True) else: msg += " File '%s' does not exist." % (req) return msg
Returns helpful msg in case requirements file does not exist, or cannot be parsed. :params req: Requirements file path
def _register_opt(parser, *args, **kwargs): try: parser.add_option(*args, **kwargs) except (optparse.OptionError, TypeError): parse_from_config = kwargs.pop('parse_from_config', False) option = parser.add_option(*args, **kwargs) if parse_from_config: parser.config_options.append(option.get_opt_string().lstrip('-'))
Handler to register an option for both Flake8 3.x and 2.x. This is based on: https://github.com/PyCQA/flake8/blob/3.0.0b2/docs/source/plugin-development/cross-compatibility.rst#option-handling-on-flake8-2-and-3 It only supports `parse_from_config` from the original function and it uses the `Option` object returned to get the string.
def get_path_modified_time(path): return float(foundations.common.get_first_item(str(os.path.getmtime(path)).split(".")))
Returns given path modification time. :param path: Path. :type path: unicode :return: Modification time. :rtype: int
def chmod(path, mode, recursive=False): log = logging.getLogger(mod_logger + '.chmod') if not isinstance(path, basestring): msg = 'path argument is not a string' log.error(msg) raise CommandError(msg) if not isinstance(mode, basestring): msg = 'mode argument is not a string' log.error(msg) raise CommandError(msg) if not os.path.exists(path): msg = 'Item not found: {p}'.format(p=path) log.error(msg) raise CommandError(msg) command = ['chmod'] if recursive: command.append('-R') command.append(mode) command.append(path) try: result = run_command(command) except CommandError: raise log.info('chmod command exited with code: {c}'.format(c=result['code'])) return result['code']
Emulates bash chmod command This method sets the file permissions to the specified mode. :param path: (str) Full path to the file or directory :param mode: (str) Mode to be set (e.g. 0755) :param recursive: (bool) Set True to make a recursive call :return: int exit code of the chmod command :raises CommandError
def areas_of_code(git_enrich, in_conn, out_conn, block_size=100): aoc = AreasOfCode(in_connector=in_conn, out_connector=out_conn, block_size=block_size, git_enrich=git_enrich) ndocs = aoc.analyze() return ndocs
Build and index for areas of code from a given Perceval RAW index. :param block_size: size of items block. :param git_enrich: GitEnrich object to deal with SortingHat affiliations. :param in_conn: ESPandasConnector to read from. :param out_conn: ESPandasConnector to write to. :return: number of documents written in ElasticSearch enriched index.
def range_as_mono(self, start_sample, end_sample): tmp_current = self.current_frame self.current_frame = start_sample tmp_frames = self.read_frames(end_sample - start_sample) if self.channels == 2: frames = np.mean(tmp_frames, axis=1) elif self.channels == 1: frames = tmp_frames else: raise IOError("Input audio must have either 1 or 2 channels") self.current_frame = tmp_current return frames
Get a range of frames as 1 combined channel :param integer start_sample: First frame in range :param integer end_sample: Last frame in range (exclusive) :returns: Track frames in range as 1 combined channel :rtype: 1d numpy array of length ``end_sample - start_sample``
def get_contact(self, email): contacts = self.get_contacts() for contact in contacts: if contact['email'] == email: return contact msg = 'No contact with email: "{email}" found.' raise FMBaseError(msg.format(email=email))
Get Filemail contact based on email. :param email: address of contact :type email: ``str``, ``unicode`` :rtype: ``dict`` with contact information
def sasqc(self) -> 'SASqc': if not self._loaded_macros: self._loadmacros() self._loaded_macros = True return SASqc(self)
This methods creates a SASqc object which you can use to run various analytics. See the sasqc.py module. :return: sasqc object
def get_field_visibility_mode(self, field): fallback_mode = ("hidden", "hidden") widget = field.widget layout = widget.isVisible(self.context, "header_table") if layout in ["invisible", "hidden"]: return fallback_mode if field.checkPermission("edit", self.context): mode = "edit" sm = getSecurityManager() if not sm.checkPermission(ModifyPortalContent, self.context): logger.warn("Permission '{}' granted for the edition of '{}', " "but 'Modify portal content' not granted" .format(field.write_permission, field.getName())) elif field.checkPermission("view", self.context): mode = "view" else: return fallback_mode if widget.isVisible(self.context, mode, field=field) != "visible": if mode == "view": return fallback_mode mode = "view" if widget.isVisible(self.context, mode, field=field) != "visible": return fallback_mode return (mode, layout)
Returns "view" or "edit" modes, together with the place within where this field has to be rendered, based on the permissions the current user has for the context and the field passed in
def fields_equal(self, instance, fields_to_ignore=("id", "change_date", "changed_by")): for field in self._meta.get_fields(): if not field.many_to_many and field.name not in fields_to_ignore: if getattr(instance, field.name) != getattr(self, field.name): return False return True
Compares this instance's fields to the supplied instance to test for equality. This will ignore any fields in `fields_to_ignore`. Note that this method ignores many-to-many fields. Args: instance: the model instance to compare fields_to_ignore: List of fields that should not be compared for equality. By default includes `id`, `change_date`, and `changed_by`. Returns: True if the checked fields are all equivalent, else False
def transform_request(self, orig_request, params, method_config): method_params = method_config.get('request', {}).get('parameters', {}) request = self.transform_rest_request(orig_request, params, method_params) request.path = method_config.get('rosyMethod', '') return request
Transforms orig_request to apiserving request. This method uses orig_request to determine the currently-pending request and returns a new transformed request ready to send to the backend. This method accepts a rest-style or RPC-style request. Args: orig_request: An ApiRequest, the original request from the user. params: A dictionary containing path parameters for rest requests, or None for an RPC request. method_config: A dict, the API config of the method to be called. Returns: An ApiRequest that's a copy of the current request, modified so it can be sent to the backend. The path is updated and parts of the body or other properties may also be changed.
def _headers(self, **kwargs): headers = BASE_HEADERS.copy() if self._token: headers['X-Plex-Token'] = self._token headers.update(kwargs) return headers
Returns dict containing base headers for all requests to the server.
def is_topic_tail(self): return self.topic.last_post.id == self.id if self.topic.last_post else False
Returns ``True`` if the post is the last post of the topic.
def plot_time_series(sdat, lovs): sovs = misc.set_of_vars(lovs) tseries = {} times = {} metas = {} for tvar in sovs: series, time, meta = get_time_series( sdat, tvar, conf.time.tstart, conf.time.tend) tseries[tvar] = series metas[tvar] = meta if time is not None: times[tvar] = time tseries['t'] = get_time_series( sdat, 't', conf.time.tstart, conf.time.tend)[0] _plot_time_list(sdat, lovs, tseries, metas, times)
Plot requested time series. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. lovs (nested list of str): nested list of series names such as the one produced by :func:`stagpy.misc.list_of_vars`. Other Parameters: conf.time.tstart: the starting time. conf.time.tend: the ending time.
def det_optimal_snrsq(self, det): try: return getattr(self._current_stats, '{}_optimal_snrsq'.format(det)) except AttributeError: self._loglr() return getattr(self._current_stats, '{}_optimal_snrsq'.format(det))
Returns the opitmal SNR squared in the given detector. Parameters ---------- det : str The name of the detector. Returns ------- float : The opimtal SNR squared.
def send_figure(self, fig, caption=''): if not self.is_token_set: raise ValueError('TelepythClient: Access token is not set!') figure = BytesIO() fig.savefig(figure, format='png') figure.seek(0) parts = [ContentDisposition('caption', caption), ContentDisposition('figure', figure, filename="figure.png", content_type='image/png')] form = MultipartFormData(*parts) content_type = 'multipart/form-data; boundary=%s' % form.boundary url = self.base_url + self.access_token req = Request(url, method='POST') req.add_header('Content-Type', content_type) req.add_header('User-Agent', __user_agent__ + '/' + __version__) req.data = form().read() res = urlopen(req) return res.getcode()
Render matplotlib figure into temporary bytes buffer and then send it to telegram user. :param fig: matplotlib figure object. :param caption: text caption of picture. :return: status code on error.
def is_in_file_tree(fpath, folder): file_folder, _ = os.path.split(fpath) other_folder = os.path.join(folder, "") return other_folder.startswith(file_folder)
Determine whether a file is in a folder. :param str fpath: filepath to investigate :param folder: path to folder to query :return bool: whether the path indicated is in the folder indicated
def retrier(*, max_attempts, sleeptime, max_sleeptime, sleepscale=1.5, jitter=0.2): assert(max_attempts > 1) assert(sleeptime >= 0) assert(0 <= jitter <= sleeptime) assert(sleepscale >= 1) cur_sleeptime = min(max_sleeptime, sleeptime) for attempt in range(max_attempts): cur_jitter = random.randint(int(-jitter * 1000), int(jitter * 1000)) / 1000 yield max(0, cur_sleeptime + cur_jitter) cur_sleeptime = min(max_sleeptime, cur_sleeptime * sleepscale)
Generator yielding time to wait for, after the attempt, if it failed.
def changed(self, src, path, dest): try: mtime = os.path.getmtime(os.path.join(src, path)) self._build(src, path, dest, mtime) except EnvironmentError as e: logging.error("{0} is inaccessible: {1}".format( termcolor.colored(path, "yellow", attrs=["bold"]), e.args[0] ))
Called whenever `path` is changed in the source folder `src`. `dest` is the output folder. The default implementation calls `build` after determining that the input file is newer than any of the outputs, or any of the outputs does not exist.
def _POUpdateBuilderWrapper(env, target=None, source=_null, **kw): if source is _null: if 'POTDOMAIN' in kw: domain = kw['POTDOMAIN'] elif 'POTDOMAIN' in env and env['POTDOMAIN']: domain = env['POTDOMAIN'] else: domain = 'messages' source = [ domain ] return env._POUpdateBuilder(target, source, **kw)
Wrapper for `POUpdate` builder - make user's life easier
def _report_line_to_dict(cls, line): data = line.split('\t') if len(data) != len(report.columns): return None d = dict(zip(report.columns, data)) for key in report.int_columns: try: d[key] = int(d[key]) except: assert d[key] == '.' for key in report.float_columns: try: d[key] = float(d[key]) except: assert d[key] == '.' d['flag'] = flag.Flag(int(d['flag'])) return d
Takes report line string as input. Returns a dict of column name -> value in line
def _get_id_token_user(token, issuers, audiences, allowed_client_ids, time_now, cache): for issuer_key, issuer in issuers.items(): issuer_cert_uri = convert_jwks_uri(issuer.jwks_uri) try: parsed_token = _verify_signed_jwt_with_certs( token, time_now, cache, cert_uri=issuer_cert_uri) except Exception: _logger.debug( 'id_token verification failed for issuer %s', issuer_key, exc_info=True) continue issuer_values = _listlike_guard(issuer.issuer, 'issuer', log_warning=False) if isinstance(audiences, _Mapping): audiences = audiences[issuer_key] if _verify_parsed_token( parsed_token, issuer_values, audiences, allowed_client_ids, is_legacy_google_auth=(issuer.issuer == _ISSUERS)): email = parsed_token['email'] return users.User(email)
Get a User for the given id token, if the token is valid. Args: token: The id_token to check. issuers: dict of Issuers audiences: List of audiences that are acceptable. allowed_client_ids: List of client IDs that are acceptable. time_now: The current time as a long (eg. long(time.time())). cache: Cache to use (eg. the memcache module). Returns: A User if the token is valid, None otherwise.
def black_tophat(image, radius=None, mask=None, footprint=None): final_image = closing(image, radius, mask, footprint) - image if not mask is None: not_mask = np.logical_not(mask) final_image[not_mask] = image[not_mask] return final_image
Black tophat filter an image using a circular structuring element image - image in question radius - radius of the circular structuring element. If no radius, use an 8-connected structuring element. mask - mask of significant pixels in the image. Points outside of the mask will not participate in the morphological operations
async def open_websocket(url: str, headers: Optional[list] = None, subprotocols: Optional[list] = None): ws = await create_websocket( url, headers=headers, subprotocols=subprotocols) try: yield ws finally: await ws.close()
Opens a websocket.
def filter(self, intersects): try: crs = self.crs vector = intersects.geometry if isinstance(intersects, GeoFeature) else intersects prepared_shape = prep(vector.get_shape(crs)) hits = [] for feature in self: target_shape = feature.geometry.get_shape(crs) if prepared_shape.overlaps(target_shape) or prepared_shape.intersects(target_shape): hits.append(feature) except IndexError: hits = [] return FeatureCollection(hits)
Filter results that intersect a given GeoFeature or Vector.
def build_exception_map(cls, tokens): exception_ranges = defaultdict(list) for token in tokens: token_type, _, token_start, token_end = token[0:4] if token_type in (tokenize.COMMENT, tokenize.STRING): if token_start[0] == token_end[0]: exception_ranges[token_start[0]].append((token_start[1], token_end[1])) else: exception_ranges[token_start[0]].append((token_start[1], sys.maxsize)) for line in range(token_start[0] + 1, token_end[0]): exception_ranges[line].append((0, sys.maxsize)) exception_ranges[token_end[0]].append((0, token_end[1])) return exception_ranges
Generates a set of ranges where we accept trailing slashes, specifically within comments and strings.
def attribute_name(self, attribute_name): if attribute_name is None: raise ValueError("Invalid value for `attribute_name`, must not be `None`") if len(attribute_name) < 1: raise ValueError("Invalid value for `attribute_name`, length must be greater than or equal to `1`") self._attribute_name = attribute_name
Sets the attribute_name of this CatalogQueryRange. The name of the attribute to be searched. :param attribute_name: The attribute_name of this CatalogQueryRange. :type: str
def to_lookup(self, key_selector=identity, value_selector=identity): if self.closed(): raise ValueError("Attempt to call to_lookup() on a closed Queryable.") if not is_callable(key_selector): raise TypeError("to_lookup() parameter key_selector={key_selector} is not callable".format( key_selector=repr(key_selector))) if not is_callable(value_selector): raise TypeError("to_lookup() parameter value_selector={value_selector} is not callable".format( value_selector=repr(value_selector))) key_value_pairs = self.select(lambda item: (key_selector(item), value_selector(item))) lookup = Lookup(key_value_pairs) return lookup
Returns a Lookup object, using the provided selector to generate a key for each item. Note: This method uses immediate execution.
def read_prototxt(fname): proto = caffe_pb2.NetParameter() with open(fname, 'r') as f: text_format.Merge(str(f.read()), proto) return proto
Return a caffe_pb2.NetParameter object that defined in a prototxt file
def XanyKX(self): result = np.empty((self.P,self.F_any.shape[1],self.dof), order='C') for p in range(self.P): FanyD = self.Fstar_any * self.D[:,p:p+1] start = 0 for term in range(self.len): stop = start + self.F[term].shape[1]*self.A[term].shape[0] result[p,:,start:stop] = self.XanyKX2_single_p_single_term(p=p, F1=FanyD, F2=self.Fstar[term], A2=self.Astar[term]) start = stop return result
compute cross covariance for any and rest
def rcfile(appname, args={}, strip_dashes=True, module_name=None): if strip_dashes: for k in args.keys(): args[k.lstrip('-')] = args.pop(k) environ = get_environment(appname) if not module_name: module_name = appname config = get_config(appname, module_name, args.get('config', '')) return merge(merge(args, config), environ)
Read environment variables and config files and return them merged with predefined list of arguments. Arguments: appname - application name, used for config files and environemnt variable names. args - arguments from command line (optparse, docopt, etc). strip_dashes - strip dashes prefixing key names from args dict. Returns: dict containing the merged variables of environment variables, config files and args. Environment variables are read if they start with appname in uppercase with underscore, for example: TEST_VAR=1 Config files compatible with ConfigParser are read and the section name appname is read, example: [appname] var=1 Files are read from: /etc/appname/config, /etc/appfilerc, ~/.config/appname/config, ~/.config/appname, ~/.appname/config, ~/.appnamerc, .appnamerc, file provided by config variable in args. Example usage with docopt: args = rcfile(__name__, docopt(__doc__, version=__version__))
def do_startInstance(self,args): parser = CommandArgumentParser("startInstance") parser.add_argument(dest='instance',help='instance index or name'); args = vars(parser.parse_args(args)) instanceId = args['instance'] force = args['force'] try: index = int(instanceId) instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances'] instanceId = instances[index] except ValueError: pass client = AwsConnectionFactory.getEc2Client() client.start_instances(InstanceIds=[instanceId['InstanceId']])
Start specified instance
def execute_specific(self, p_todo): self._handle_recurrence(p_todo) self.execute_specific_core(p_todo) printer = PrettyPrinter() self.out(self.prefix() + printer.print_todo(p_todo))
Actions specific to this command.
def getNewQuery(connection = None, commitOnEnd=False, *args, **kargs): if connection is None: return query.PySQLQuery(getNewConnection(*args, **kargs), commitOnEnd = commitOnEnd) else: return query.PySQLQuery(connection, commitOnEnd = commitOnEnd)
Create a new PySQLQuery Class @param PySQLConnectionObj: Connection Object representing your connection string @param commitOnEnd: Default False, When query is complete do you wish to auto commit. This is a one time auto commit @author: Nick Verbeck @since: 5/12/2008 @updated: 7/19/2008 - Added commitOnEnd support
def export_img(visio_filename, image_filename, pagenum=None, pagename=None): image_pathname = os.path.abspath(image_filename) if not os.path.isdir(os.path.dirname(image_pathname)): msg = 'Could not write image file: %s' % image_filename raise IOError(msg) with VisioFile.Open(visio_filename) as visio: pages = filter_pages(visio.pages, pagenum, pagename) try: if len(pages) == 1: pages[0].Export(image_pathname) else: digits = int(log(len(pages), 10)) + 1 basename, ext = os.path.splitext(image_pathname) filename_format = "%s%%0%dd%s" % (basename, digits, ext) for i, page in enumerate(pages): filename = filename_format % (i + 1) page.Export(filename) except Exception: raise IOError('Could not write image: %s' % image_pathname)
Exports images from visio file
def _unfocus(self, event): w = self.focus_get() if w != self and 'spinbox' not in str(w) and 'entry' not in str(w): self.focus_set()
Unfocus palette items when click on bar or square.
def generate(self, x, **kwargs): assert self.sess is not None, \ 'Cannot use `generate` when no `sess` was provided' self.parse_params(**kwargs) labels, nb_classes = self.get_or_guess_labels(x, kwargs) attack = CWL2(self.sess, self.model, self.batch_size, self.confidence, 'y_target' in kwargs, self.learning_rate, self.binary_search_steps, self.max_iterations, self.abort_early, self.initial_const, self.clip_min, self.clip_max, nb_classes, x.get_shape().as_list()[1:]) def cw_wrap(x_val, y_val): return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype) wrap = tf.py_func(cw_wrap, [x, labels], self.tf_dtype) wrap.set_shape(x.get_shape()) return wrap
Return a tensor that constructs adversarial examples for the given input. Generate uses tf.py_func in order to operate over tensors. :param x: A tensor with the inputs. :param kwargs: See `parse_params`
def dump_edn_val(v): " edn simple value dump" if isinstance(v, (str, unicode)): return json.dumps(v) elif isinstance(v, E): return unicode(v) else: return dumps(v)
edn simple value dump
def _MakeFileDescriptorProto(proto_file_name, full_name, field_items): package, name = full_name.rsplit('.', 1) file_proto = descriptor_pb2.FileDescriptorProto() file_proto.name = os.path.join(package.replace('.', '/'), proto_file_name) file_proto.package = package desc_proto = file_proto.message_type.add() desc_proto.name = name for f_number, (f_name, f_type) in enumerate(field_items, 1): field_proto = desc_proto.field.add() field_proto.name = f_name field_proto.number = f_number field_proto.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL field_proto.type = f_type return file_proto
Populate FileDescriptorProto for MessageFactory's DescriptorPool.
def export_distributions(region=None, key=None, keyid=None, profile=None): results = OrderedDict() conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: for name, distribution in _list_distributions( conn, region=region, key=key, keyid=keyid, profile=profile, ): config = distribution['distribution']['DistributionConfig'] tags = distribution['tags'] distribution_sls_data = [ {'name': name}, {'config': config}, {'tags': tags}, ] results['Manage CloudFront distribution {0}'.format(name)] = { 'boto_cloudfront.present': distribution_sls_data, } except botocore.exceptions.ClientError as err: raise err dumper = __utils__['yaml.get_dumper']('IndentedSafeOrderedDumper') return __utils__['yaml.dump']( results, default_flow_style=False, Dumper=dumper, )
Get details of all CloudFront distributions. Produces results that can be used to create an SLS file. CLI Example: .. code-block:: bash salt-call boto_cloudfront.export_distributions --out=txt |\ sed "s/local: //" > cloudfront_distributions.sls
def prepare_model(self, sess, allow_initialize=True): if self._follower: self.wait_for_initialization() else: self._init_model(sess, allow_initialize) if sess is not self._sess: if self.threads: raise ValueError('You must call stop_queues() before ' 'starting a new session with QueueRunners.') self._sess = sess self._start_threads(sess)
Initialize the model and if necessary launch the queue runners.