code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def session_list(consul_url=None, token=None, return_list=False, **kwargs): ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/list' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if return_list: _list = [] for item in ret['data']: _list.append(item['ID']) return _list return ret
Used to list sessions. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param return_list: By default, all information about the sessions is returned, using the return_list parameter will return a list of session IDs. :return: A list of all available sessions. CLI Example: .. code-block:: bash salt '*' consul.session_list
def get_product_string(self): self._check_device_status() str_p = ffi.new("wchar_t[]", 255) rv = hidapi.hid_get_product_string(self._device, str_p, 255) if rv == -1: raise IOError("Failed to read product string from HID device: {0}" .format(self._get_last_error_string())) return ffi.string(str_p)
Get the Product String from the HID device. :return: The Product String :rtype: unicode
def register_actor(name, actor_handle): if not isinstance(name, str): raise TypeError("The name argument must be a string.") if not isinstance(actor_handle, ray.actor.ActorHandle): raise TypeError("The actor_handle argument must be an ActorHandle " "object.") actor_name = _calculate_key(name) pickled_state = pickle.dumps(actor_handle) already_exists = _internal_kv_put(actor_name, pickled_state) if already_exists: actor_handle._ray_new_actor_handles.pop() raise ValueError( "Error: the actor with name={} already exists".format(name))
Register a named actor under a string key. Args: name: The name of the named actor. actor_handle: The actor object to be associated with this name
def ls(manager: Manager, url: Optional[str], namespace_id: Optional[int]): if url: n = manager.get_or_create_namespace(url) if isinstance(n, Namespace): _page(n.entries) else: click.echo('uncachable namespace') elif namespace_id is not None: _ls(manager, Namespace, namespace_id) else: click.echo('Missing argument -i or -u')
List cached namespaces.
def assertEqual(first, second, message=None): if not first == second: raise TestStepFail( format_message(message) if message is not None else "Assert: %s != %s" % (str(first), str(second)))
Assert that first equals second. :param first: First part to evaluate :param second: Second part to evaluate :param message: Failure message :raises: TestStepFail if not first == second
def open(self) -> bool: return self.state is State.OPEN and not self.transfer_data_task.done()
This property is ``True`` when the connection is usable. It may be used to detect disconnections but this is discouraged per the EAFP_ principle. When ``open`` is ``False``, using the connection raises a :exc:`~websockets.exceptions.ConnectionClosed` exception. .. _EAFP: https://docs.python.org/3/glossary.html#term-eafp
def isUrl(urlString): parsed = urlparse.urlparse(urlString) urlparseValid = parsed.netloc != '' and parsed.scheme != '' regex = re.compile( r'^(?:http|ftp)s?://' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' r'localhost|' r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' r'(?::\d+)?' r'(?:/?|[/?]\S+)$', re.IGNORECASE) return regex.match(urlString) and urlparseValid
Attempts to return whether a given URL string is valid by checking for the presence of the URL scheme and netloc using the urlparse module, and then using a regex. From http://stackoverflow.com/questions/7160737/
def modver(self, *args): g = get_root(self).globals if self.ok(): tname = self.val.get() if tname in self.successes: self.verify.config(bg=g.COL['start']) elif tname in self.failures: self.verify.config(bg=g.COL['stop']) else: self.verify.config(bg=g.COL['main']) self.verify.config(state='normal') else: self.verify.config(bg=g.COL['main']) self.verify.config(state='disable') if self.callback is not None: self.callback()
Switches colour of verify button
def get_direct_band_gap(self): if self.is_metal(): return 0.0 dg = self.get_direct_band_gap_dict() return min(v['value'] for v in dg.values())
Returns the direct band gap. Returns: the value of the direct band gap
def find_fields(self, classname=".*", fieldname=".*", fieldtype=".*", accessflags=".*"): for cname, c in self.classes.items(): if re.match(classname, cname): for f in c.get_fields(): z = f.get_field() if re.match(fieldname, z.get_name()) and \ re.match(fieldtype, z.get_descriptor()) and \ re.match(accessflags, z.get_access_flags_string()): yield f
find fields by regex :param classname: regular expression of the classname :param fieldname: regular expression of the fieldname :param fieldtype: regular expression of the fieldtype :param accessflags: regular expression of the access flags :rtype: generator of `FieldClassAnalysis`
def CallNtpdate(logger): ntpd_inactive = subprocess.call(['service', 'ntpd', 'status']) try: if not ntpd_inactive: subprocess.check_call(['service', 'ntpd', 'stop']) subprocess.check_call( 'ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`', shell=True) if not ntpd_inactive: subprocess.check_call(['service', 'ntpd', 'start']) except subprocess.CalledProcessError: logger.warning('Failed to sync system time with ntp server.') else: logger.info('Synced system time with ntp server.')
Sync clock using ntpdate. Args: logger: logger object, used to write to SysLog and serial port.
def from_row(row): subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5] return Advice.objects.create( id=row[0], administration=cleanup(row[1]), type=row[2], session=datetime.strptime(row[4], '%d/%m/%Y'), subject=cleanup(subject), topics=[t.title() for t in cleanup(row[6]).split(', ')], tags=[tag.strip() for tag in row[7].split(',') if tag.strip()], meanings=cleanup(row[8]).replace(' / ', '/').split(', '), part=_part(row[9]), content=cleanup(row[10]), )
Create an advice from a CSV row
def monitors(self): import ns1.rest.monitoring return ns1.rest.monitoring.Monitors(self.config)
Return a new raw REST interface to monitors resources :rtype: :py:class:`ns1.rest.monitoring.Monitors`
def isConnected(self, fromName, toName): for c in self.connections: if (c.fromLayer.name == fromName and c.toLayer.name == toName): return 1 return 0
Are these two layers connected this way?
def find_needed_input(input_format): needed_inputs = [re.cls for re in registry if re.category==RegistryCategories.inputs and re.cls.input_format == input_format] if len(needed_inputs)>0: return needed_inputs[0] return None
Find a needed input class input_format - needed input format, see utils.input.dataformats
def register_instances(name, instances, region=None, key=None, keyid=None, profile=None): if isinstance(instances, six.string_types) or isinstance(instances, six.text_type): instances = [instances] conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: registered_instances = conn.register_instances(name, instances) except boto.exception.BotoServerError as error: log.warning(error) return False registered_instance_ids = [instance.id for instance in registered_instances] register_failures = set(instances).difference(set(registered_instance_ids)) if register_failures: log.warning('Instance(s): %s not registered with ELB %s.', list(register_failures), name) register_result = False else: register_result = True return register_result
Register instances with an ELB. Instances is either a string instance id or a list of string instance id's. Returns: - ``True``: instance(s) registered successfully - ``False``: instance(s) failed to be registered CLI example: .. code-block:: bash salt myminion boto_elb.register_instances myelb instance_id salt myminion boto_elb.register_instances myelb "[instance_id,instance_id]"
def GetSystemConfigurationArtifact(self, session_identifier=CURRENT_SESSION): system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.code_page = self.GetValue( 'codepage', default_value=self._codepage) system_configuration.hostname = self._hostnames.get( session_identifier, None) system_configuration.keyboard_layout = self.GetValue('keyboard_layout') system_configuration.operating_system = self.GetValue('operating_system') system_configuration.operating_system_product = self.GetValue( 'operating_system_product') system_configuration.operating_system_version = self.GetValue( 'operating_system_version') date_time = datetime.datetime(2017, 1, 1) time_zone = self._time_zone.tzname(date_time) if time_zone and isinstance(time_zone, py2to3.BYTES_TYPE): time_zone = time_zone.decode('ascii') system_configuration.time_zone = time_zone user_accounts = self._user_accounts.get(session_identifier, {}) system_configuration.user_accounts = list(user_accounts.values()) return system_configuration
Retrieves the knowledge base as a system configuration artifact. Args: session_identifier (Optional[str])): session identifier, where CURRENT_SESSION represents the active session. Returns: SystemConfigurationArtifact: system configuration artifact.
def get_crop_size(crop_w, crop_h, image_w, image_h): scale1 = float(crop_w) / float(image_w) scale2 = float(crop_h) / float(image_h) scale1_w = crop_w scale1_h = int(round(image_h * scale1)) scale2_w = int(round(image_w * scale2)) scale2_h = crop_h if scale1_h > crop_h: return (scale1_w, scale1_h) else: return (scale2_w, scale2_h)
Determines the correct scale size for the image when img w == crop w and img h > crop h Use these dimensions when img h == crop h and img w > crop w Use these dimensions
def galprop_gasmap(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) self._replace_none(kwargs_copy) localpath = NameFactory.galprop_gasmap_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
return the file name for Galprop input gasmaps
def reload(self): self.restarted_adapter = False self.data.clear() if conf.use_winpcapy: from scapy.arch.pcapdnet import load_winpcapy load_winpcapy() self.load() conf.iface = get_working_if()
Reload interface list
def to_json(self): result = { 'sys': {} } for k, v in self.sys.items(): if k in ['space', 'content_type', 'created_by', 'updated_by', 'published_by']: v = v.to_json() if k in ['created_at', 'updated_at', 'deleted_at', 'first_published_at', 'published_at', 'expires_at']: v = v.isoformat() result['sys'][camel_case(k)] = v return result
Returns the JSON representation of the resource.
def deleteVertex(self, document, waitForSync = False) : url = "%s/vertex/%s" % (self.URL, document._id) r = self.connection.session.delete(url, params = {'waitForSync' : waitForSync}) data = r.json() if r.status_code == 200 or r.status_code == 202 : return True raise DeletionError("Unable to delete vertice, %s" % document._id, data)
deletes a vertex from the graph as well as al linked edges
def add_middleware(self, middleware, *, before=None, after=None): assert not (before and after), \ "provide either 'before' or 'after', but not both" if before or after: for i, m in enumerate(self.middleware): if isinstance(m, before or after): break else: raise ValueError("Middleware %r not found" % (before or after)) if before: self.middleware.insert(i, middleware) else: self.middleware.insert(i + 1, middleware) else: self.middleware.append(middleware) self.actor_options |= middleware.actor_options for actor_name in self.get_declared_actors(): middleware.after_declare_actor(self, actor_name) for queue_name in self.get_declared_queues(): middleware.after_declare_queue(self, queue_name) for queue_name in self.get_declared_delay_queues(): middleware.after_declare_delay_queue(self, queue_name)
Add a middleware object to this broker. The middleware is appended to the end of the middleware list by default. You can specify another middleware (by class) as a reference point for where the new middleware should be added. Parameters: middleware(Middleware): The middleware. before(type): Add this middleware before a specific one. after(type): Add this middleware after a specific one. Raises: ValueError: When either ``before`` or ``after`` refer to a middleware that hasn't been registered yet.
def new(namespace, name, protected=False, attributes=dict(), api_url=fapi.PROD_API_ROOT): r = fapi.create_workspace(namespace, name, protected, attributes, api_url) fapi._check_response_code(r, 201) return Workspace(namespace, name, api_url)
Create a new FireCloud workspace. Returns: Workspace: A new FireCloud workspace Raises: FireCloudServerError: API call failed.
def _insert_new_layers(self, new_layers, start_node_id, end_node_id): new_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) temp_output_id = new_node_id for layer in new_layers[:-1]: temp_output_id = self.add_layer(layer, temp_output_id) self._add_edge(new_layers[-1], temp_output_id, end_node_id) new_layers[-1].input = self.node_list[temp_output_id] new_layers[-1].output = self.node_list[end_node_id] self._redirect_edge(start_node_id, end_node_id, new_node_id)
Insert the new_layers after the node with start_node_id.
def bleu_score(predictions, labels, **unused_kwargs): outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0)
BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: bleu: int, approx bleu score
def _corrupt(self, data, dpos): ws = list(self._BLK_BE.unpack_from(data, dpos)) for t in range(16, 80): tmp = ws[(t - 3) & 15] ^ ws[(t - 8) & 15] ^ ws[(t - 14) & 15] ^ ws[(t - 16) & 15] ws[t & 15] = ((tmp << 1) | (tmp >> (32 - 1))) & 0xFFFFFFFF self._BLK_LE.pack_into(data, dpos, *ws)
Corruption from SHA1 core.
def incident(self, name, owner=None, **kwargs): return Incident(self.tcex, name, owner=owner, **kwargs)
Create the Incident TI object. Args: owner: name: **kwargs: Return:
def open(self): self._connection = sqlite3.connect(self._dbname) self._cursor = self._connection.cursor() self._session_info = SessionInfoTable(self._connection, self._cursor) self._reports = ReportsTable(self._connection, self._cursor)
open the database
def run(self): self.init_run() if self.debug: self.dump("AfterInit: ") while self.step(): pass
Runs the simulation.
def fencekml(self, layername): if layername.startswith('"') and layername.endswith('"'): layername = layername[1:-1] for layer in self.allayers: if layer.key == layername: self.fenceloader.clear() if len(layer.points) < 3: return self.fenceloader.target_system = self.target_system self.fenceloader.target_component = self.target_component bounds = mp_util.polygon_bounds(layer.points) (lat, lon, width, height) = bounds center = (lat+width/2, lon+height/2) self.fenceloader.add_latlon(center[0], center[1]) for lat, lon in layer.points: self.fenceloader.add_latlon(lat, lon) self.send_fence()
set a layer as the geofence
def override(self, obj): for field in obj.__class__.export_fields: setattr(self, field, getattr(obj, field))
Overrides the plain fields of the dashboard.
def _spec_to_globs(address_mapper, specs): patterns = set() for spec in specs: patterns.update(spec.make_glob_patterns(address_mapper)) return PathGlobs(include=patterns, exclude=address_mapper.build_ignore_patterns)
Given a Specs object, return a PathGlobs object for the build files that it matches.
def edit(self, state): if state and state.lower() == 'active': data = dumps({'state': state.lower()}) json = self._json(self._patch(self._api, data=data)) self._update_attributes(json) return self
Edit the user's membership. :param str state: (required), the state the membership should be in. Only accepts ``"active"``. :returns: itself
def open_netcdf_writer(self, flatten=False, isolate=False, timeaxis=1): self._netcdf_writer = netcdftools.NetCDFInterface( flatten=bool(flatten), isolate=bool(isolate), timeaxis=int(timeaxis))
Prepare a new |NetCDFInterface| object for writing data.
def vmotion_disable(host, username, password, protocol=None, port=None, host_names=None): service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) host_names = _check_hosts(service_instance, host, host_names) ret = {} for host_name in host_names: host_ref = _get_host_ref(service_instance, host, host_name=host_name) vmotion_system = host_ref.configManager.vmotionSystem try: vmotion_system.DeselectVnic() except vim.fault.HostConfigFault as err: msg = 'vsphere.vmotion_disable failed: {0}'.format(err) log.debug(msg) ret.update({host_name: {'Error': msg, 'VMotion Disabled': False}}) continue ret.update({host_name: {'VMotion Disabled': True}}) return ret
Disable vMotion for a given host or list of host_names. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter which hosts should disable VMotion. If host_names is not provided, VMotion will be disabled for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash # Used for single ESXi host connection information salt '*' vsphere.vmotion_disable my.esxi.host root bad-password # Used for connecting to a vCenter Server salt '*' vsphere.vmotion_disable my.vcenter.location root bad-password \ host_names='[esxi-1.host.com, esxi-2.host.com]'
def _parse_coroutine(self): while True: d = yield if d == int2byte(0): pass elif d == IAC: d2 = yield if d2 == IAC: self.received_data(d2) elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA): self.command_received(d2, None) elif d2 in (DO, DONT, WILL, WONT): d3 = yield self.command_received(d2, d3) elif d2 == SB: data = [] while True: d3 = yield if d3 == IAC: d4 = yield if d4 == SE: break else: data.append(d4) else: data.append(d3) self.negotiate(b''.join(data)) else: self.received_data(d)
Parser state machine. Every 'yield' expression returns the next byte.
def frmnam(frcode, lenout=_default_len_out): frcode = ctypes.c_int(frcode) lenout = ctypes.c_int(lenout) frname = stypes.stringToCharP(lenout) libspice.frmnam_c(frcode, lenout, frname) return stypes.toPythonString(frname)
Retrieve the name of a reference frame associated with a SPICE ID code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/frmnam_c.html :param frcode: an integer code for a reference frame :type frcode: int :param lenout: Maximum length of output string. :type lenout: int :return: the name associated with the reference frame. :rtype: str
def rename_command(source, destination): source_ep, source_path = source dest_ep, dest_path = destination if source_ep != dest_ep: raise click.UsageError( ( "rename requires that the source and dest " "endpoints are the same, {} != {}" ).format(source_ep, dest_ep) ) endpoint_id = source_ep client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) res = client.operation_rename(endpoint_id, oldpath=source_path, newpath=dest_path) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
Executor for `globus rename`
def label_set(self): label_set = list() for class_ in self.class_set: samples_in_class = self.sample_ids_in_class(class_) label_set.append(self.labels[samples_in_class[0]]) return label_set
Set of labels in the dataset corresponding to class_set.
def _format_object(obj, format_type=None): if json_api_settings.FORMAT_KEYS is not None: return format_keys(obj, format_type) return format_field_names(obj, format_type)
Depending on settings calls either `format_keys` or `format_field_names`
def train_model(model_folder): os.chdir(model_folder) training = generate_training_command(model_folder) if training is None: return -1 logging.info(training) os.chdir(model_folder) os.system(training)
Train the model in ``model_folder``.
def utime_delta(days=0, hours=0, minutes=0, seconds=0): return (days * DAY) + (hours * HOUR) + (minutes * MINUTE) + (seconds * SECOND)
Gets time delta in microseconds. Note: Do NOT use this function without keyword arguments. It will become much-much harder to add extra time ranges later if positional arguments are used.
def on_to_position(self, speed, position, brake=True, block=True): speed = self._speed_native_units(speed) self.speed_sp = int(round(speed)) self.position_sp = position self._set_brake(brake) self.run_to_abs_pos() if block: self.wait_until('running', timeout=WAIT_RUNNING_TIMEOUT) self.wait_until_not_moving()
Rotate the motor at ``speed`` to ``position`` ``speed`` can be a percentage or a :class:`ev3dev2.motor.SpeedValue` object, enabling use of other units.
async def reconnect(self): _LOGGER.debug('starting Connection.reconnect') await self._connect() while self._closed: await self._retry_connection() _LOGGER.debug('ending Connection.reconnect')
Reconnect to the modem.
def get_cursor_vertical_diff(self): if self.in_get_cursor_diff: self.another_sigwinch = True return 0 cursor_dy = 0 while True: self.in_get_cursor_diff = True self.another_sigwinch = False cursor_dy += self._get_cursor_vertical_diff_once() self.in_get_cursor_diff = False if not self.another_sigwinch: return cursor_dy
Returns the how far down the cursor moved since last render. Note: If another get_cursor_vertical_diff call is already in progress, immediately returns zero. (This situation is likely if get_cursor_vertical_diff is called from a SIGWINCH signal handler, since sigwinches can happen in rapid succession and terminal emulators seem not to respond to cursor position queries before the next sigwinch occurs.)
def make_trajectory(first, filename, restart=False): mode = 'w' if restart: mode = 'a' return Trajectory(first, filename, mode)
Factory function to easily create a trajectory object
def read_passive_target(self, card_baud=PN532_MIFARE_ISO14443A, timeout_sec=1): response = self.call_function(PN532_COMMAND_INLISTPASSIVETARGET, params=[0x01, card_baud], response_length=17) if response is None: return None if response[0] != 0x01: raise RuntimeError('More than one card detected!') if response[5] > 7: raise RuntimeError('Found card with unexpectedly long UID!') return response[6:6+response[5]]
Wait for a MiFare card to be available and return its UID when found. Will wait up to timeout_sec seconds and return None if no card is found, otherwise a bytearray with the UID of the found card is returned.
def _get_dopants(substitutions, num_dopants, match_oxi_sign): n_type = [pred for pred in substitutions if pred['dopant_species'].oxi_state > pred['original_species'].oxi_state and (not match_oxi_sign or np.sign(pred['dopant_species'].oxi_state) == np.sign(pred['original_species'].oxi_state))] p_type = [pred for pred in substitutions if pred['dopant_species'].oxi_state < pred['original_species'].oxi_state and (not match_oxi_sign or np.sign(pred['dopant_species'].oxi_state) == np.sign(pred['original_species'].oxi_state))] return {'n_type': n_type[:num_dopants], 'p_type': p_type[:num_dopants]}
Utility method to get n- and p-type dopants from a list of substitutions.
def create_mapping(self, mapped_class, configuration=None): cfg = self.__configuration.copy() if not configuration is None: cfg.update(configuration) provided_ifcs = provided_by(object.__new__(mapped_class)) if IMemberResource in provided_ifcs: base_data_element_class = self.member_data_element_base_class elif ICollectionResource in provided_ifcs: base_data_element_class = self.collection_data_element_base_class elif IResourceLink in provided_ifcs: base_data_element_class = self.linked_data_element_base_class else: raise ValueError('Mapped class for data element class does not ' 'implement one of the required interfaces.') name = "%s%s" % (mapped_class.__name__, base_data_element_class.__name__) de_cls = type(name, (base_data_element_class,), {}) mp = self.mapping_class(self, mapped_class, de_cls, cfg) de_cls.mapping = mp return mp
Creates a new mapping for the given mapped class and representer configuration. :param configuration: configuration for the new data element class. :type configuration: :class:`RepresenterConfiguration` :returns: newly created instance of :class:`Mapping`
def QueryService(svc_name): hscm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS) result = None try: hs = win32serviceutil.SmartOpenService(hscm, svc_name, win32service.SERVICE_ALL_ACCESS) result = win32service.QueryServiceConfig(hs) win32service.CloseServiceHandle(hs) finally: win32service.CloseServiceHandle(hscm) return result
Query service and get its config.
def evaluate_forward( distribution, x_data, parameters=None, cache=None, ): assert len(x_data) == len(distribution), ( "distribution %s is not of length %d" % (distribution, len(x_data))) assert hasattr(distribution, "_cdf"), ( "distribution require the `_cdf` method to function.") cache = cache if cache is not None else {} parameters = load_parameters( distribution, "_cdf", parameters=parameters, cache=cache) cache[distribution] = x_data out = numpy.zeros(x_data.shape) out[:] = distribution._cdf(x_data, **parameters) return out
Evaluate forward Rosenblatt transformation. Args: distribution (Dist): Distribution to evaluate. x_data (numpy.ndarray): Locations for where evaluate forward transformation at. parameters (:py:data:typing.Any): Collection of parameters to override the default ones in the distribution. cache (:py:data:typing.Any): A collection of previous calculations in case the same distribution turns up on more than one occasion. Returns: The cumulative distribution values of ``distribution`` at location ``x_data`` using parameters ``parameters``.
def _ready_gzip_fastq(in_files, data, require_bgzip=False): all_gzipped = all([not x or x.endswith(".gz") for x in in_files]) if require_bgzip and all_gzipped: all_gzipped = all([not x or not _check_gzipped_input(x, data)[0] for x in in_files]) needs_convert = dd.get_quality_format(data).lower() == "illumina" needs_trim = dd.get_trim_ends(data) do_splitting = dd.get_align_split_size(data) is not False return (all_gzipped and not needs_convert and not do_splitting and not objectstore.is_remote(in_files[0]) and not needs_trim and not get_downsample_params(data))
Check if we have gzipped fastq and don't need format conversion or splitting. Avoid forcing bgzip if we don't need indexed files.
def _start_dev_proc(self, device_os, device_config): log.info('Starting the child process for %s', device_os) dos = NapalmLogsDeviceProc(device_os, self.opts, device_config) os_proc = Process(target=dos.start) os_proc.start() os_proc.description = '%s device process' % device_os log.debug('Started process %s for %s, having PID %s', os_proc._name, device_os, os_proc.pid) return os_proc
Start the device worker process.
def soft_break(self, el, text): if el.name == 'p' and el.namespace and el.namespace == self.namespaces["text"]: text.append('\n')
Apply soft break if needed.
def add_size_scaled_points( self, longitude, latitude, data, shape='o', logplot=False, alpha=1.0, colour='b', smin=2.0, sscale=2.0, overlay=False): if logplot: data = np.log10(data.copy()) x, y, = self.m(longitude, latitude) self.m.scatter(x, y, marker=shape, s=(smin + data ** sscale), c=colour, alpha=alpha, zorder=2) if not overlay: plt.show()
Plots a set of points with size scaled according to the data :param bool logplot: Choose to scale according to the logarithm (base 10) of the data :param float smin: Minimum scale size :param float sscale: Scaling factor
def get_agents(self, addr=True, agent_cls=None, as_coro=False): return self.env.get_agents(addr=addr, agent_cls=agent_cls)
Get agents from the managed environment. This is a managing function for the :py:meth:`~creamas.environment.Environment.get_agents`. Returned agent list excludes the environment's manager agent (this agent) by design.
def check_local() -> None: to_check = ['./replay', './replay/toDo', './replay/archive'] for i in to_check: if not os.path.exists(i): os.makedirs(i)
Verify required directories exist. This functions checks the current working directory to ensure that the required directories exist. If they do not exist, it will create them.
def to_utf8(value): if isinstance(value, unicode): return value.encode('utf-8') assert isinstance(value, str) return value
Returns a string encoded using UTF-8. This function comes from `Tornado`_. :param value: A unicode or string to be encoded. :returns: The encoded string.
def set_mypy_args(self, mypy_args=None): if mypy_args is None: self.mypy_args = None else: self.mypy_errs = [] self.mypy_args = list(mypy_args) if not any(arg.startswith("--python-version") for arg in mypy_args): self.mypy_args += [ "--python-version", ".".join(str(v) for v in get_target_info_len2(self.comp.target, mode="nearest")), ] if logger.verbose: for arg in verbose_mypy_args: if arg not in self.mypy_args: self.mypy_args.append(arg) logger.log("MyPy args:", self.mypy_args)
Set MyPy arguments.
def final_spin_from_f0_tau(f0, tau, l=2, m=2): f0, tau, input_is_array = ensurearray(f0, tau) a, b, c = _berti_spin_constants[l,m] origshape = f0.shape f0 = f0.ravel() tau = tau.ravel() spins = numpy.zeros(f0.size) for ii in range(spins.size): Q = f0[ii] * tau[ii] * numpy.pi try: s = 1. - ((Q-a)/b)**(1./c) except ValueError: s = numpy.nan spins[ii] = s spins = spins.reshape(origshape) return formatreturn(spins, input_is_array)
Returns the final spin based on the given frequency and damping time. .. note:: Currently, only l = m = 2 is supported. Any other indices will raise a ``KeyError``. Parameters ---------- f0 : float or array Frequency of the QNM (in Hz). tau : float or array Damping time of the QNM (in seconds). l : int, optional l-index of the harmonic. Default is 2. m : int, optional m-index of the harmonic. Default is 2. Returns ------- float or array The spin of the final black hole. If the combination of frequency and damping times give an unphysical result, ``numpy.nan`` will be returned.
def create(self, set): target_url = self.client.get_url('SET', 'POST', 'create') r = self.client.request('POST', target_url, json=set._serialize()) return set._deserialize(r.json(), self)
Creates a new Set.
def backward(self, out_grads=None, is_train=True): if out_grads is None: out_grads = [] elif isinstance(out_grads, NDArray): out_grads = [out_grads] elif isinstance(out_grads, dict): out_grads = [out_grads[k] for k in self._symbol.list_outputs()] for obj in out_grads: if not isinstance(obj, NDArray): raise TypeError("inputs must be NDArray") ndarray = c_handle_array(out_grads) check_call(_LIB.MXExecutorBackwardEx( self.handle, mx_uint(len(out_grads)), ndarray, ctypes.c_int(is_train)))
Do backward pass to get the gradient of arguments. Parameters ---------- out_grads : NDArray or list of NDArray or dict of str to NDArray, optional Gradient on the outputs to be propagated back. This parameter is only needed when bind is called on outputs that are not a loss function. is_train : bool, default True Whether this backward is for training or inference. Note that in rare cases you want to call backward with is_train=False to get gradient during inference. Examples -------- >>> # Example for binding on loss function symbol, which gives the loss value of the model. >>> # Equivalently it gives the head gradient for backward pass. >>> # In this example the built-in SoftmaxOutput is used as loss function. >>> # MakeLoss can be used to define customized loss function symbol. >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(net, name='fc', num_hidden=6) >>> net = mx.sym.Activation(net, name='relu', act_type="relu") >>> net = mx.sym.SoftmaxOutput(net, name='softmax') >>> args = {'data': mx.nd.ones((1, 4)), 'fc_weight': mx.nd.ones((6, 4)), >>> 'fc_bias': mx.nd.array((1, 4, 4, 4, 5, 6)), 'softmax_label': mx.nd.ones((1))} >>> args_grad = {'fc_weight': mx.nd.zeros((6, 4)), 'fc_bias': mx.nd.zeros((6))} >>> texec = net.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print out.asnumpy() [[ 0.00378404 0.07600445 0.07600445 0.07600445 0.20660152 0.5616011 ]] >>> texec.backward() >>> print(texec.grad_arrays[1].asnumpy()) [[ 0.00378404 0.00378404 0.00378404 0.00378404] [-0.92399555 -0.92399555 -0.92399555 -0.92399555] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.07600445 0.07600445 0.07600445 0.07600445] [ 0.20660152 0.20660152 0.20660152 0.20660152] [ 0.5616011 0.5616011 0.5616011 0.5616011 ]] >>> >>> # Example for binding on non-loss function symbol. >>> # Here the binding symbol is neither built-in loss function >>> # nor customized loss created by MakeLoss. >>> # As a result the head gradient is not automatically provided. >>> a = mx.sym.Variable('a') >>> b = mx.sym.Variable('b') >>> # c is not a loss function symbol >>> c = 2 * a + b >>> args = {'a': mx.nd.array([1,2]), 'b':mx.nd.array([2,3])} >>> args_grad = {'a': mx.nd.zeros((2)), 'b': mx.nd.zeros((2))} >>> texec = c.bind(ctx=mx.cpu(), args=args, args_grad=args_grad) >>> out = texec.forward(is_train=True)[0].copy() >>> print(out.asnumpy()) [ 4. 7.] >>> # out_grads is the head gradient in backward pass. >>> # Here we define 'c' as loss function. >>> # Then 'out' is passed as head gradient of backward pass. >>> texec.backward(out) >>> print(texec.grad_arrays[0].asnumpy()) [ 8. 14.] >>> print(texec.grad_arrays[1].asnumpy()) [ 4. 7.]
def age(self): if self.date_range is None: return dob = self.date_range.middle today = datetime.date.today() if (today.month, today.day) < (dob.month, dob.day): return today.year - dob.year - 1 else: return today.year - dob.year
int, the estimated age of the person. Note that A DOB object is based on a date-range and the exact date is usually unknown so for age calculation the the middle of the range is assumed to be the real date-of-birth.
def wait_ready(self, name, timeout=5.0, sleep_interval=0.2): end = time() + timeout while True: try: info = self.bucket_info(name).value for node in info['nodes']: if node['status'] != 'healthy': raise NotReadyError.pyexc('Not all nodes are healthy') return except E.CouchbaseError: if time() + sleep_interval > end: raise sleep(sleep_interval)
Wait for a newly created bucket to be ready. :param string name: the name to wait for :param seconds timeout: the maximum amount of time to wait :param seconds sleep_interval: the number of time to sleep between each probe :raise: :exc:`.CouchbaseError` on internal HTTP error :raise: :exc:`NotReadyError` if all nodes could not be ready in time
def load(path=None, root=None, db=None, load_user=True): "Load all of the config files. " config = load_config(path, load_user=load_user) remotes = load_remotes(path, load_user=load_user) if remotes: if not 'remotes' in config: config.remotes = AttrDict() for k, v in remotes.remotes.items(): config.remotes[k] = v accounts = load_accounts(path, load_user=load_user) if accounts: if not 'accounts' in config: config.accounts = AttrDict() for k, v in accounts.accounts.items(): config.accounts[k] = v update_config(config) if root: config.library.filesystem_root = root if db: config.library.database = db return config
Load all of the config files.
def uninstall_packages(): p = server_state('packages_installed') if p: installed = set(p) else: return env.uninstalled_packages[env.host] = [] packages = set(get_packages()) uninstall = installed - packages if uninstall and env.verbosity: print env.host,'UNINSTALLING HOST PACKAGES' for p in uninstall: if env.verbosity: print ' - uninstalling',p uninstall_package(p) env.uninstalled_packages[env.host].append(p) set_server_state('packages_installed',get_packages()) return
Uninstall unwanted packages
def _is_ctype(self, ctype): if not self.valid: return False mime = self.content_type return self.ContentMimetypes.get(mime) == ctype
Return True iff content is valid and of the given type.
def protocol_names(self): l = self.protocols() retval = [str(k.name) for k in l] return retval
Returns all registered protocol names
def get_file(fname, datapath=datapath): datapath = pathlib.Path(datapath) datapath.mkdir(parents=True, exist_ok=True) dlfile = datapath / fname if not dlfile.exists(): print("Attempting to download file {} from {} to {}.". format(fname, webloc, datapath)) try: dl_file(url=webloc+fname, dest=dlfile) except BaseException: warnings.warn("Download failed: {}".format(fname)) raise return dlfile
Return path of an example data file Return the full path to an example data file name. If the file does not exist in the `datapath` directory, tries to download it from the ODTbrain GitHub repository.
def set_lim(min, max, name): scale = _context['scales'][_get_attribute_dimension(name)] scale.min = min scale.max = max return scale
Set the domain bounds of the scale associated with the provided key. Parameters ---------- name: hashable Any variable that can be used as a key for a dictionary Raises ------ KeyError When no context figure is associated with the provided key.
def _read_snc(snc_file): snc_raw_dtype = dtype([('sampleStamp', '<i'), ('sampleTime', '<q')]) with snc_file.open('rb') as f: f.seek(352) snc_raw = fromfile(f, dtype=snc_raw_dtype) sampleStamp = snc_raw['sampleStamp'] sampleTime = asarray([_filetime_to_dt(x) for x in snc_raw['sampleTime']]) return sampleStamp, sampleTime
Read Synchronization File and return sample stamp and time Returns ------- sampleStamp : list of int Sample number from start of study sampleTime : list of datetime.datetime File time representation of sampleStamp Notes ----- The synchronization file is used to calculate a FILETIME given a sample stamp (and vise-versa). Theoretically, it is possible to calculate a sample stamp's FILETIME given the FILETIME of sample stamp zero (when sampling started) and the sample rate. However, because the sample rate cannot be represented with full precision the accuracy of the FILETIME calculation is affected. To compensate for the lack of accuracy, the synchronization file maintains a sample stamp-to-computer time (called, MasterTime) mapping. Interpolation is then used to calculate a FILETIME given a sample stamp (and vise-versa). The attributes, sampleStamp and sampleTime, are used to predict (using interpolation) the FILETIME based upon a given sample stamp (and vise-versa). Currently, the only use for this conversion process is to enable correlation of EEG (sample_stamp) data with other sources of data such as Video (which works in FILETIME).
def make_client(instance): neutron_client = utils.get_client_class( API_NAME, instance._api_version[API_NAME], API_VERSIONS, ) instance.initialize() url = instance._url url = url.rstrip("/") client = neutron_client(username=instance._username, project_name=instance._project_name, password=instance._password, region_name=instance._region_name, auth_url=instance._auth_url, endpoint_url=url, endpoint_type=instance._endpoint_type, token=instance._token, auth_strategy=instance._auth_strategy, insecure=instance._insecure, ca_cert=instance._ca_cert, retries=instance._retries, raise_errors=instance._raise_errors, session=instance._session, auth=instance._auth) return client
Returns an neutron client.
def make_parent_dirs(path, mode=0o777): parent = os.path.dirname(path) if parent: make_all_dirs(parent, mode) return path
Ensure parent directories of a file are created as needed.
def create_dir_rec(path: Path): if not path.exists(): Path.mkdir(path, parents=True, exist_ok=True)
Create a folder recursive. :param path: path :type path: ~pathlib.Path
def main(): logging.basicConfig(format=LOGGING_FORMAT) log = logging.getLogger(__name__) parser = argparse.ArgumentParser() add_debug(parser) add_app(parser) add_env(parser) add_properties(parser) args = parser.parse_args() logging.getLogger(__package__.split(".")[0]).setLevel(args.debug) log.debug('Parsed arguements: %s', args) if "prod" not in args.env: log.info('No slack message sent, not a production environment') else: log.info("Sending slack message, production environment") slacknotify = SlackNotification(app=args.app, env=args.env, prop_path=args.properties) slacknotify.post_message()
Send Slack notification to a configured channel.
def merge_rdf_list(rdf_list): if isinstance(rdf_list, list): rdf_list = rdf_list[0] rtn_list = [] item = rdf_list if item.get('rdf_rest') and item.get('rdf_rest',[1])[0] != 'rdf_nil': rtn_list += merge_rdf_list(item['rdf_rest'][0]) if item.get('rdf_first'): rtn_list += item['rdf_first'] rtn_list.reverse() return rtn_list
takes an rdf list and merges it into a python list args: rdf_list: the RdfDataset object with the list values returns: list of values
def inverse_transform(self, Y, columns=None): try: if not hasattr(self, "data_pca"): try: if Y.shape[1] != self.data_nu.shape[1]: raise ValueError except IndexError: raise ValueError if columns is None: return Y else: columns = np.array([columns]).flatten() return Y[:, columns] else: if columns is None: return self.data_pca.inverse_transform(Y) else: columns = np.array([columns]).flatten() Y_inv = np.dot(Y, self.data_pca.components_[:, columns]) if hasattr(self.data_pca, "mean_"): Y_inv += self.data_pca.mean_[columns] return Y_inv except ValueError: raise ValueError("data of shape {} cannot be inverse transformed" " from graph built on data of shape {}".format( Y.shape, self.data_nu.shape))
Transform input data `Y` to ambient data space defined by `self.data` Takes data in the same reduced space as `self.data_nu` and transforms it to be in the same ambient space as `self.data`. Parameters ---------- Y : array-like, shape=[n_samples_y, n_pca] n_features must be the same as `self.data_nu`. columns : list-like list of integers referring to column indices in the original data space to be returned. Avoids recomputing the full matrix where only a few dimensions of the ambient space are of interest Returns ------- Inverse transformed data, shape=[n_samples_y, n_features] Raises ------ ValueError : if Y.shape[1] != self.data_nu.shape[1]
def encode_numpy(array): return {'data' : base64.b64encode(array.data).decode('utf8'), 'type' : array.dtype.name, 'shape': array.shape}
Encode a numpy array as a base64 encoded string, to be JSON serialized. :return: a dictionary containing the fields: - *data*: the base64 string - *type*: the array type - *shape*: the array shape
def frameify(self, state, data): try: yield state.recv_buf + data except FrameSwitch: pass finally: state.recv_buf = ''
Yield the data as a single frame.
def from_dict(cls, d): def _from_dict(_d): return AdfKey.from_dict(_d) if _d is not None else None operation = d.get("operation") title = d.get("title") basis_set = _from_dict(d.get("basis_set")) xc = _from_dict(d.get("xc")) units = _from_dict(d.get("units")) scf = _from_dict(d.get("scf")) others = [AdfKey.from_dict(o) for o in d.get("others", [])] geo = _from_dict(d.get("geo")) return cls(operation, basis_set, xc, title, units, geo.subkeys, scf, others)
Construct a MSONable AdfTask object from the JSON dict. Parameters ---------- d : dict A dict of saved attributes. Returns ------- task : AdfTask An AdfTask object recovered from the JSON dict ``d``.
def _compile(self, source, filename): if filename == '<template>': filename = 'dbt-{}'.format( codecs.encode(os.urandom(12), 'hex').decode('ascii') ) filename = jinja2._compat.encode_filename(filename) linecache.cache[filename] = ( len(source), None, [line + '\n' for line in source.splitlines()], filename ) return super(MacroFuzzEnvironment, self)._compile(source, filename)
Override jinja's compilation to stash the rendered source inside the python linecache for debugging.
def _get_opus_maximum(self): label = opmax = self.session.get_resource( BASE_URI_TYPES % "opmax", self.session.get_class(surf.ns.ECRM['E55_Type']) ) if opmax.is_present(): return opmax else: opmax.rdfs_label.append(Literal(label, "en")) logger.debug("Created a new opus maximum type instance") opmax.save() return opmax
Instantiate an opus maximum type.
def wsgi_proxyfix(factory=None): def create_wsgi(app, **kwargs): wsgi_app = factory(app, **kwargs) if factory else app.wsgi_app if app.config.get('WSGI_PROXIES'): return ProxyFix(wsgi_app, num_proxies=app.config['WSGI_PROXIES']) return wsgi_app return create_wsgi
Fix ``REMOTE_ADDR`` based on ``X-Forwarded-For`` headers. .. note:: You must set ``WSGI_PROXIES`` to the correct number of proxies, otherwise you application is susceptible to malicious attacks. .. versionadded:: 1.0.0
def fit(self, train_set, test_set): with tf.Graph().as_default(), tf.Session() as self.tf_session: self.build_model() tf.global_variables_initializer().run() third = self.num_epochs // 3 for i in range(self.num_epochs): lr_decay = self.lr_decay ** max(i - third, 0.0) self.tf_session.run( tf.assign(self.lr_var, tf.multiply(self.learning_rate, lr_decay))) train_perplexity = self._run_train_step(train_set, 'train') print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity)) test_perplexity = self._run_train_step(test_set, 'test') print("Test Perplexity: %.3f" % test_perplexity)
Fit the model to the given data. :param train_set: training data :param test_set: test data
def write(self, filename): if not filename.endswith(('.mid', '.midi', '.MID', '.MIDI')): filename = filename + '.mid' pm = self.to_pretty_midi() pm.write(filename)
Write the multitrack pianoroll to a MIDI file. Parameters ---------- filename : str The name of the MIDI file to which the multitrack pianoroll is written.
def _run_parallel_multiprocess(self): _log.debug("run.parallel.multiprocess.start") processes = [] ProcRunner.instance = self for i in range(self._ncores): self._status.running(i) proc = multiprocessing.Process(target=ProcRunner.run, args=(i,)) proc.start() processes.append(proc) for i in range(self._ncores): processes[i].join() code = processes[i].exitcode self._status.success(i) if 0 == code else self._status.fail(i) _log.debug("run.parallel.multiprocess.end states={}".format(self._status))
Run processes from queue
def google_storage_url(self, sat): filename = sat['scene'] + '.tar.bz' return url_builder([self.google, sat['sat'], sat['path'], sat['row'], filename])
Returns a google storage url the contains the scene provided. :param sat: Expects an object created by scene_interpreter method :type sat: dict :returns: (String) The URL to a google storage file
def stringify(data): ret = [] for item in data: if six.PY2 and isinstance(item, str): item = salt.utils.stringutils.to_unicode(item) elif not isinstance(item, six.string_types): item = six.text_type(item) ret.append(item) return ret
Given an iterable, returns its items as a list, with any non-string items converted to unicode strings.
def get_eventhub_host(self): for protocol in self.service.settings.data['publish']['protocol_details']: if protocol['protocol'] == 'grpc': return protocol['uri'][0:protocol['uri'].index(':')]
returns the publish grpc endpoint for ingestion.
def edge_cost(self, node_a, node_b): cost = float('inf') node_object_a = self.get_node(node_a) for edge_id in node_object_a['edges']: edge = self.get_edge(edge_id) tpl = (node_a, node_b) if edge['vertices'] == tpl: cost = edge['cost'] break return cost
Returns the cost of moving between the edge that connects node_a to node_b. Returns +inf if no such edge exists.
def get_default_property_values(self, classname): schema_element = self.get_element_by_class_name(classname) result = { property_name: property_descriptor.default for property_name, property_descriptor in six.iteritems(schema_element.properties) } if schema_element.is_edge: result.pop(EDGE_SOURCE_PROPERTY_NAME, None) result.pop(EDGE_DESTINATION_PROPERTY_NAME, None) return result
Return a dict with default values for all properties declared on this class.
def after_loop(self, coro): if not (inspect.iscoroutinefunction(coro) or inspect.isawaitable(coro)): raise TypeError('Expected coroutine or awaitable, received {0.__name__!r}.'.format(type(coro))) self._after_loop = coro
A function that also acts as a decorator to register a coroutine to be called after the loop finished running. Parameters ------------ coro: :term:`py:awaitable` The coroutine to register after the loop finishes. Raises ------- TypeError The function was not a coroutine.
def add_serviceListener(self, type, listener): self.remove_service_listener(listener) self.browsers.append(ServiceBrowser(self, type, listener))
Adds a listener for a particular service type. This object will then have its update_record method called when information arrives for that type.
def getRequiredNodes(self): return {nodeShape:len(self.nodeReservations[nodeShape]) for nodeShape in self.nodeShapes}
Returns a dict from node shape to number of nodes required to run the packed jobs.
def get_status(self): status = self.get('status') if status == Report.PASSED: for sr_name in self._sub_reports: sr = self._sub_reports[sr_name] sr_status = sr.get_status() reason = sr.get('reason') if sr_status == Report.ERROR: self.error(reason) break if sr_status == Report.FAILED: self.failed(reason) break status = self.get('status') return status
Get the status of the report and its sub-reports. :rtype: str :return: report status ('passed', 'failed' or 'error')
def _lease_owned(self, lease, current_uuid_path): prev_uuid_path, prev_uuid = lease.metadata with open(current_uuid_path) as f: current_uuid = f.read() return \ current_uuid_path == prev_uuid_path and \ prev_uuid == current_uuid
Checks if the given lease is owned by the prefix whose uuid is in the given path Note: The prefix must be also in the same path it was when it took the lease Args: path (str): Path to the lease current_uuid_path (str): Path to the uuid to check ownership of Returns: bool: ``True`` if the given lease in owned by the prefix, ``False`` otherwise
def build(obj: Any, *applicators: Callable[..., Any]) -> Any: if isinstance(obj, BaseChain): return pipe(obj, copy(), *applicators) else: return pipe(obj, *applicators)
Run the provided object through the series of applicator functions. If ``obj`` is an instances of :class:`~eth.chains.base.BaseChain` the applicators will be run on a copy of the chain and thus will not mutate the provided chain instance.
def _has_sj_index(ref_file): return (file_exists(os.path.join(ref_file, "sjdbInfo.txt")) and (file_exists(os.path.join(ref_file, "transcriptInfo.tab"))))
this file won't exist if we can do on the fly splice junction indexing
def recover(self, requeue=False, cb=None): args = Writer() args.write_bit(requeue) self._recover_cb.append(cb) self.send_frame(MethodFrame(self.channel_id, 60, 110, args)) self.channel.add_synchronous_cb(self._recv_recover_ok)
Ask server to redeliver all unacknowledged messages.