docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Check if ``jsondata`` has the structure of a keystore file version 3. Note that this test is not complete, e.g. it doesn't check key derivation or cipher parameters. Copied from https://github.com/vbuterin/pybitcointools Args: jsondata: Dictionary containing the data from the json file Return...
def check_keystore_json(jsondata: Dict) -> bool: if 'crypto' not in jsondata and 'Crypto' not in jsondata: return False if 'version' not in jsondata: return False if jsondata['version'] != 3: return False crypto = jsondata.get('crypto', jsondata.get('Crypto')) if 'ciphe...
181,202
Find the keystore file for an account, unlock it and get the private key Args: address: The Ethereum address for which to find the keyfile in the system password: Mostly for testing purposes. A password can be provided as the function argument here. If it's no...
def get_privkey(self, address: AddressHex, password: str) -> PrivateKey: address = add_0x_prefix(address).lower() if not self.address_in_keystore(address): raise ValueError('Keystore file not found for %s' % address) with open(self.accounts[address]) as data_file: ...
181,205
Load an account from a keystore file. Args: path: full path to the keyfile password: the password to decrypt the key file or `None` to leave it encrypted
def load(cls, path: str, password: str = None) -> 'Account': with open(path) as f: keystore = json.load(f) if not check_keystore_json(keystore): raise ValueError('Invalid keystore file') return Account(keystore, password, path=path)
181,207
Consume tokens. Args: tokens (float): number of transport tokens to consume Returns: wait_time (float): waiting time for the consumer
def consume(self, tokens): wait_time = 0. self.tokens -= tokens if self.tokens < 0: self._get_tokens() if self.tokens < 0: wait_time = -self.tokens / self.fill_rate return wait_time
181,301
Checks if the account has enough balance to handle the lifecycles of all open channels as well as the to be created channels. Note: This is just an estimation. Args: raiden: A raiden service instance channels_to_open: The number of new channels that should be opened Returns: T...
def has_enough_gas_reserve( raiden, channels_to_open: int = 0, ) -> Tuple[bool, int]: secure_reserve_estimate = get_reserve_estimate(raiden, channels_to_open) current_account_balance = raiden.chain.client.balance(raiden.chain.client.address) return secure_reserve_estimate <= current_ac...
181,307
Initializes a new `LogFilter` Args: config: Dictionary mapping module names to logging level default_level: The default logging level
def __init__(self, config: Dict[str, str], default_level: str): self._should_log: Dict[Tuple[str, str], bool] = {} # the empty module is not matched, so set it here self._default_level = config.get('', default_level) self._log_rules = [ (logger.split('.') if logger e...
181,331
Creates a new channel in the TokenNetwork contract. Args: partner: The peer to open the channel with. settle_timeout: The settle timeout to use for this channel. given_block_identifier: The block identifier of the state change that prompte...
def new_netting_channel( self, partner: Address, settle_timeout: int, given_block_identifier: BlockSpecification, ) -> ChannelID: checking_block = self.client.get_checking_block() self._new_channel_preconditions( partner=partner, ...
181,364
Install a new filter for an array of topics emitted by the contract. Args: topics: A list of event ids to filter for. Can also be None, in which case all events are queried. from_block: The block number at which to start looking for events. to_block: The ...
def events_filter( self, topics: List[str] = None, from_block: BlockSpecification = None, to_block: BlockSpecification = None, ) -> StatelessFilter: return self.client.new_filter( self.address, topics=topics, from_b...
181,386
Install a new filter for all the events emitted by the current token network contract Args: from_block: Create filter starting from this block number (default: 0). to_block: Create filter stopping at this block number (default: 'latest'). Return: The filter instance...
def all_events_filter( self, from_block: BlockSpecification = GENESIS_BLOCK_NUMBER, to_block: BlockSpecification = 'latest', ) -> StatelessFilter: return self.events_filter(None, from_block, to_block)
181,387
Register a token with the raiden token manager. Args: registry_address: registry address token_address_hex (string): a hex encoded token address. Returns: The token network proxy.
def register_token( self, registry_address_hex: typing.AddressHex, token_address_hex: typing.AddressHex, retry_timeout: typing.NetworkTimeout = DEFAULT_RETRY_TIMEOUT, ) -> TokenNetwork: registry_address = decode_hex(registry_address_hex) token...
181,413
Wait until a contract is mined Args: contract_address_hex (string): hex encoded address of the contract timeout (int): time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise
def wait_for_contract(self, contract_address_hex, timeout=None): contract_address = decode_hex(contract_address_hex) start_time = time.time() result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) current_time = time.ti...
181,415
Save events. Args: state_change_identifier: Id of the state change that generate these events. events: List of Event objects.
def write_events(self, events): with self.write_lock, self.conn: self.conn.executemany( 'INSERT INTO state_events(' ' identifier, source_statechange_id, log_time, data' ') VALUES(?, ?, ?, ?)', events, )
181,425
Delete state changes. Args: state_changes_to_delete: List of ids to delete.
def delete_state_changes(self, state_changes_to_delete: List[int]) -> None: with self.write_lock, self.conn: self.conn.executemany( 'DELETE FROM state_events WHERE identifier = ?', state_changes_to_delete, )
181,426
Save events. Args: state_change_identifier: Id of the state change that generate these events. events: List of Event objects.
def write_events(self, state_change_identifier, events, log_time): events_data = [ (None, state_change_identifier, log_time, self.serializer.serialize(event)) for event in events ] return super().write_events(events_data)
181,450
Open a port for the raiden service (listening at `internal_port`) through UPnP. Args: internal_port (int): the target port of the raiden service external_start_port (int): query for an external port starting here (default: internal_port) Returns: external_ip_address, ext...
def open_port(upnp, internal_port, external_start_port=None): if external_start_port is None: external_start_port = internal_port if upnp is None: return False def register(internal, external): # test existing mappings mapping = upnp.getspecificportmapping(external, 'U...
181,496
Try to release the port mapping for `external_port`. Args: external_port (int): the port that was previously forwarded to. Returns: success (boolean): if the release was successful.
def release_port(upnp, external_port): mapping = upnp.getspecificportmapping(external_port, 'UDP') if mapping is None: log.error('could not find a port mapping', external=external_port) return False else: log.debug('found existing port mapping', mapping=mapping) if upnp.de...
181,497
Calculate a blocktime estimate based on some past blocks. Args: oldest: delta in block numbers to go back. Return: average block time in seconds
def estimate_blocktime(self, oldest: int = 256) -> float: last_block_number = self.block_number() # around genesis block there is nothing to estimate if last_block_number < 1: return 15 # if there are less than `oldest` blocks available, start at block 1 if l...
181,502
Return a proxy for interacting with a smart contract. Args: contract_interface: The contract interface as defined by the json. address: The contract's address.
def new_contract_proxy(self, contract_interface, contract_address: Address): return ContractProxy( self, contract=self.new_contract(contract_interface, contract_address), )
181,550
Wait until the `transaction_hash` is applied or rejected. Args: transaction_hash: Transaction hash that we are waiting for.
def poll( self, transaction_hash: bytes, ): if len(transaction_hash) != 32: raise ValueError( 'transaction_hash must be a 32 byte hash', ) transaction_hash = encode_hex(transaction_hash) # used to check if the transac...
181,555
Helper function to unpack event data using a provided ABI Args: abi: The ABI of the contract, not the ABI of the event log_: The raw event data Returns: The decoded event
def decode_event(abi: ABI, log_: Dict) -> Dict: if isinstance(log_['topics'][0], str): log_['topics'][0] = decode_hex(log_['topics'][0]) elif isinstance(log_['topics'][0], int): log_['topics'][0] = decode_hex(hex(log_['topics'][0])) event_id = log_['topics'][0] events = filter_by_ty...
181,610
Returns events emmitted by a contract for a given event name, within a certain range. Args: web3: A Web3 instance contract_manager: A contract manager contract_address: The address of the contract to be filtered, can be `None` contract_name: The name of the contract topics: ...
def query_blockchain_events( web3: Web3, contract_manager: ContractManager, contract_address: Address, contract_name: str, topics: List, from_block: BlockNumber, to_block: BlockNumber, ) -> List[Dict]: filter_params = { 'fromBlock': from_block, ...
181,611
Returns the path with the highest `version` number. Raises: AssertionError: If any of the `paths` in the list is an invalid name. Args: paths: A list of file names.
def latest_db_file(paths: List[str]) -> Optional[str]: dbs = {} for db_path in paths: matches = VERSION_RE.match(os.path.basename(db_path)) assert matches, f'Invalid path name {db_path}' try: version = int(matches.group(1)) except ValueError: continu...
181,643
Returns a filtered list of `paths`, where every name matches our format. Args: paths: A list of file names.
def filter_db_names(paths: List[str]) -> List[str]: return [ db_path for db_path in paths if VERSION_RE.match(os.path.basename(db_path)) ]
181,644
Automatically maintain channels open for the given token network. Args: token_address: the ERC20 token network to connect to. funds: the amount of funds that can be used by the ConnectionMananger. initial_channel_target: number of channels to open proactively. jo...
def token_network_connect( self, registry_address: PaymentNetworkID, token_address: TokenAddress, funds: TokenAmount, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ) -> None: if not is_binary_address(regi...
181,725
After Raiden learns about a new block this function must be called to handle expiration of the hash time locks. Args: state: The current state. Return: TransitionResult: The resulting iteration
def handle_block( mediator_state: MediatorTransferState, state_change: Block, channelidentifiers_to_channels: ChannelMap, pseudo_random_generator: random.Random, ) -> TransitionResult[MediatorTransferState]: expired_locks_events = events_to_remove_expired_locks( mediator...
181,880
Keep listening for events forever. Args: timeout_ms: How long to poll the Home Server for before retrying. exception_handler: Optional exception handler function which can be used to handle exceptions in the caller thread. bad_sync_timeout: Base time to wait a...
def listen_forever( self, timeout_ms: int = 30000, exception_handler: Callable[[Exception], None] = None, bad_sync_timeout: int = 5, ): _bad_sync_timeout = bad_sync_timeout self.should_listen = True while self.should_listen: ...
182,225
Start a listener greenlet to listen for events in the background. Args: timeout_ms: How long to poll the Home Server for before retrying. exception_handler: Optional exception handler function which can be used to handle exceptions in the caller thread.
def start_listener_thread(self, timeout_ms: int = 30000, exception_handler: Callable = None): assert not self.should_listen and self.sync_thread is None, 'Already running' self.should_listen = True self.sync_thread = gevent.spawn(self.listen_forever, timeout_ms, exception_handler) ...
182,226
Search user directory for a given term, returning a list of users Args: term: term to be searched for Returns: user_list: list of users returned by server-side search
def search_user_directory(self, term: str) -> List[User]: response = self.api._send( 'POST', '/user_directory/search', { 'search_term': term, }, ) try: return [ User(self.api, _user['user_id'], _...
182,228
Send typing event directly to api Args: room: room to send typing event to timeout: timeout for the event, in ms
def typing(self, room: Room, timeout: int = 5000): path = f'/rooms/{quote(room.room_id)}/typing/{quote(self.user_id)}' return self.api._send('PUT', path, {'typing': True, 'timeout': timeout})
182,231
Initialize the state manager. Args: state_transition: function that can apply a StateChange message. current_state: current application state.
def __init__(self, state_transition: Callable, current_state: Optional[State]) -> None: if not callable(state_transition): raise ValueError('state_transition must be a callable') self.state_transition = state_transition self.current_state = current_state
182,304
Apply the `state_change` in the current machine and return the resulting events. Args: state_change: An object representation of a state change. Return: A list of events produced by the state transition. It's the upper layer's responsibility to d...
def dispatch(self, state_change: StateChange) -> List[Event]: assert isinstance(state_change, StateChange) # the state objects must be treated as immutable, so make a copy of the # current state and pass the copy to the state machine to be modified. next_state = deepcopy(self.c...
182,305
Sorts a list of servers by http round-trip time Params: servers: sequence of http server urls Returns: sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed servers (possibly empty)
def sort_servers_closest(servers: Sequence[str]) -> Sequence[Tuple[str, float]]: if not {urlparse(url).scheme for url in servers}.issubset({'http', 'https'}): raise TransportError('Invalid server urls') get_rtt_jobs = set( gevent.spawn(lambda url: (url, get_http_rtt(url)), server_url) ...
182,408
Given a list of possible servers, chooses the closest available and create a GMatrixClient Params: servers: list of servers urls, with scheme (http or https) Rest of args and kwargs are forwarded to GMatrixClient constructor Returns: GMatrixClient instance for one of the available serve...
def make_client(servers: Sequence[str], *args, **kwargs) -> GMatrixClient: if len(servers) > 1: sorted_servers = [ server_url for (server_url, _) in sort_servers_closest(servers) ] log.info( 'Automatically selecting matrix homeserver based on RTT', ...
182,409
Sends a message to one of the global rooms These rooms aren't being listened on and therefore no reply could be heard, so these messages are sent in a send-and-forget async way. The actual room name is composed from the suffix given as parameter and chain name or id e.g.: raiden_ropsten...
def send_global(self, room: str, message: Message) -> None: self._global_send_queue.put((room, message)) self._global_send_event.set()
182,459
Helper function to unpack event data using a provided ABI Args: abi: The ABI of the contract, not the ABI of the event log: The raw event data Returns: The decoded event
def decode_event(abi: Dict, log: Dict): if isinstance(log['topics'][0], str): log['topics'][0] = decode_hex(log['topics'][0]) elif isinstance(log['topics'][0], int): log['topics'][0] = decode_hex(hex(log['topics'][0])) event_id = log['topics'][0] events = filter_by_type('event', abi...
182,717
Resample the dataset. Args: seed (int, optional): Seed for resampling. By default no seed is used.
def resample(self, seed=None): if seed is not None: gen = torch.manual_seed(seed) else: gen = torch.default_generator if self.replacement: self.perm = torch.LongTensor(len(self)).random_( len(self.dataset), generator=gen) else...
182,751
Outputs a function which will log the arguments to Visdom in an appropriate way. Args: vis_fn: A function, such as self.vis.image
def _viz_prototype(self, vis_fn): def _viz_logger(*args, **kwargs): self.win = vis_fn(*args, win=self.win, env=self.env, opts=self.opts, **kwargs) return _viz_logg...
182,794
Multiple lines can be added to the same plot with the "name" attribute (see example) Args: fields: Currently unused plot_type: {scatter, line} Examples: >>> scatter_logger = VisdomPlotLogger('line') >>> scatter_logger.log(stats['ep...
def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server="localhost", name=None): super(VisdomPlotLogger, self).__init__(fields, win, env, opts, port, server) valid_plot_types = { "scatter": self.viz.scatter, "line": self.viz.line} ...
182,798
Update the results file with new information. Args: task_name (str): Name of the currently running task. A previously unseen ``task_name`` will create a new entry in both :attr:`tasks` and :attr:`results`. result: This will be appended to the list in :att...
def update(self, task_name, result): with open(self.filepath, 'rb') as f: existing_results = pickle.load(f) if task_name not in self.tasks: self._add_task(task_name) existing_results['tasks'].append(task_name) existing_results['results'].append([]...
182,821
Function to sample a new configuration This function is called inside Hyperband to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled returns: config should return a valid configuration
def get_config(self, budget): self.logger.debug('start sampling a new configuration.') sample = None info_dict = {} # If no model is available, sample from prior # also mix in a fraction of random configs if len(self.kde_models.keys()) == 0 or np.random.rand() < self.random_fraction: sample ...
182,930
Function to sample a new configuration This function is called inside Hyperband to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled returns: config should return a valid configuration
def get_config(self, budget): sample = None info_dict = {} # If no model is available, sample from prior # also mix in a fraction of random configs if len(self.kde_models.keys()) == 0 or np.random.rand() < self.random_fraction: sample = self.configspace.sample_configuration() info_dict['model_bas...
182,949
function to register finished runs Every time a run has finished, this function should be called to register it with the result logger. If overwritten, make sure to call this method from the base class to ensure proper logging. Parameters: ----------- job: hpbandster.distributed.dispatcher.Job o...
def new_result(self, job, update_model=True): super().new_result(job) if job.result is None: # One could skip crashed results, but we decided # assign a +inf loss and count them as bad configurations loss = np.inf else: loss = job.result["loss"] budget = job.kwargs["budget"] if budget not ...
182,950
Iteration class to resample new configurations along side keeping the good ones in SuccessiveHalving. Parameters: ----------- resampling_rate: float fraction of configurations that are resampled at each stage min_samples_advance:int number of samples that are guaranteed to proceed to the ...
def __init__(self, *args, resampling_rate = 0.5, min_samples_advance = 1, **kwargs): self.resampling_rate = resampling_rate self.min_samples_advance = min_samples_advance
182,955
BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration. See Li et al. (2016) for reference. Parameters: ----------- iteration: int the index of the iteration to be instantiated Returns: -------- SuccessiveHalving: the SuccessiveHalving iteration with the correspondin...
def get_next_iteration(self, iteration, iteration_kwargs={}): min_budget = max( self.min_budget, self.config_generator.largest_budget_with_model()) max_budget = self.max_budget eta = self.eta # precompute some HB stuff max_SH_iter = -int(np.log(min_budget/max_budget)/np.log(eta)) + 1 budgets = max_bu...
182,958
Function to sample a new configuration This function is called inside Hyperband to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled returns: config should return a valid configuration
def get_config(self, budget): # No observations available for this budget sample from the prior if len(self.kde_models.keys()) == 0: return self.configspace.sample_configuration().get_dictionary() # If we haven't seen anything with this budget, we sample from the kde trained on the highest budget if budge...
182,960
function to register finished runs Every time a run has finished, this function should be called to register it with the result logger. If overwritten, make sure to call this method from the base class to ensure proper logging. Parameters: ----------- job_id: dict a dictionary containing all...
def new_result(self, job): super(KernelDensityEstimator, self).new_result(job) budget = job.kwargs["budget"] if budget not in self.configs.keys(): self.configs[budget] = [] self.losses[budget] = [] # We want to get a numerical representation of the configuration in the original space conf = ConfigS...
182,961
predict the loss of an unseen configuration Parameters: ----------- times: numpy array times where to predict the loss config: numpy array the numerical representation of the config Returns: -------- ...
def predict_unseen(self, times, config): assert np.all(times > 0) and np.all(times <= self.max_num_epochs) x = np.array(config)[None, :] idx = times / self.max_num_epochs x = np.repeat(x, idx.shape[0], axis=0) x = np.concatenate((x, idx[:, None]), axis=1) me...
182,982
function to sample a new configuration This function is called inside Hyperband to query a new configuration Parameters: ----------- budget: float the budget for which this configuration is scheduled returns: config should r...
def get_config(self, budget): self.lock.acquire() if not self.is_trained: c = self.config_space.sample_configuration().get_array() else: candidates = np.array([self.config_space.sample_configuration().get_array() for _ in range(...
183,046
starts a Pyro4 nameserver in a daemon thread Parameters: ----------- host: str the hostname to use for the nameserver port: int the port to be used. Default =0 means a random port nic_name: str name of the network interface to use Returns: -------- tuple (str, int): the host na...
def start_local_nameserver(host=None, port=0, nic_name=None): if host is None: if nic_name is None: host = 'localhost' else: host = nic_name_to_host(nic_name) uri, ns, _ = Pyro4.naming.startNS(host=host, port=port) host, port = ns.locationStr.split(':') thread = threading.Thread(target=ns.request...
183,048
Connect to the device. Args: banner: See protocol_handler.Connect. **kwargs: See protocol_handler.Connect and adb_commands.ConnectDevice for kwargs. Includes handle, rsa_keys, and auth_timeout_ms. Returns: An instance of this class if the device connected su...
def _Connect(self, banner=None, **kwargs): if not banner: banner = socket.gethostname().encode() conn_str = self.protocol_handler.Connect(self._handle, banner=banner, **kwargs) # Remove banner and colons after device state (state::banner) parts = conn_str.split(b'...
183,205
Removes a package from the device. Args: package_name: Package name of target package. keep_data: whether to keep the data and cache directories timeout_ms: Expected timeout for pushing and installing. Returns: The pm uninstall output.
def Uninstall(self, package_name, keep_data=False, timeout_ms=None): cmd = ['pm uninstall'] if keep_data: cmd.append('-k') cmd.append('"%s"' % package_name) return self.Shell(' '.join(cmd), timeout_ms=timeout_ms)
183,207
Return a directory listing of the given path. Args: device_path: Directory to list.
def List(self, device_path): connection = self.protocol_handler.Open(self._handle, destination=b'sync:') listing = self.filesync_handler.List(connection, device_path) connection.Close() return listing
183,211
Reboot the device. Args: destination: Specify 'bootloader' for fastboot.
def Reboot(self, destination=b''): self.protocol_handler.Open(self._handle, b'reboot:%s' % destination)
183,212
Run command on the device, returning the output. Args: command: Shell command to run timeout_ms: Maximum time to allow the command to run.
def Shell(self, command, timeout_ms=None): return self.protocol_handler.Command( self._handle, service=b'shell', command=command, timeout_ms=timeout_ms)
183,213
Run command on the device, yielding each line of output. Args: command: Command to run on the target. timeout_ms: Maximum time to allow the command to run. Yields: The responses from the shell command.
def StreamingShell(self, command, timeout_ms=None): return self.protocol_handler.StreamingCommand( self._handle, service=b'shell', command=command, timeout_ms=timeout_ms)
183,214
Initialize USB Handle. Arguments: device: libusb_device to connect to. setting: libusb setting with the correct endpoints to communicate with. usb_info: String describing the usb path/serial/device, for debugging. timeout_ms: Timeout in milliseconds for all I/O.
def __init__(self, device, setting, usb_info=None, timeout_ms=None): self._setting = setting self._device = device self._handle = None self._usb_info = usb_info or '' self._timeout_ms = timeout_ms if timeout_ms else DEFAULT_TIMEOUT_MS self._max_read_packet_len =...
183,220
Find and return the first matching device. Args: setting_matcher: See cls.FindDevices. device_matcher: See cls.FindDevices. **kwargs: See cls.FindDevices. Returns: An instance of UsbHandle. Raises: DeviceNotFoundError: Raised if the device is ...
def FindFirst(cls, setting_matcher, device_matcher=None, **kwargs): try: return next(cls.FindDevices( setting_matcher, device_matcher=device_matcher, **kwargs)) except StopIteration: raise usb_exceptions.DeviceNotFoundError( 'No device ava...
183,230
Initialize the TCP Handle. Arguments: serial: Android device serial of the form host or host:port. Host may be an IP address or a host name.
def __init__(self, serial, timeout_ms=None): # if necessary, convert serial to a unicode string if isinstance(serial, (bytes, bytearray)): serial = serial.decode('utf-8') if ':' in serial: self.host, self.port = serial.split(':') else: self.h...
183,232
Constructs a FastbootProtocol instance. Args: usb: UsbHandle instance. chunk_kb: Packet size. For older devices, 4 may be required.
def __init__(self, usb, chunk_kb=1024): self.usb = usb self.chunk_kb = chunk_kb
183,259
Sends a command to the device. Args: command: The command to send. arg: Optional argument to the command.
def SendCommand(self, command, arg=None): if arg is not None: if not isinstance(arg, bytes): arg = arg.encode('utf8') command = b'%s:%s' % (command, arg) self._Write(io.BytesIO(command), len(command))
183,260
Accepts normal responses from the device. Args: timeout_ms: Timeout in milliseconds to wait for each response. info_cb: Optional callback for text sent from the bootloader. Returns: OKAY packet's message.
def HandleSimpleResponses( self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._AcceptResponses(b'OKAY', info_cb, timeout_ms=timeout_ms)
183,261
Flashes a partition from the file on disk. Args: partition: Partition name to flash to. source_file: Filename to download to the device. source_len: Optional length of source_file, uses os.stat if not provided. info_cb: See Download. progress_callback: See Down...
def FlashFromFile(self, partition, source_file, source_len=0, info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None): if source_len == 0: # Fall back to stat. source_len = os.stat(source_file).st_size download_response = self.Download( ...
183,267
Flashes the last downloaded file to the given partition. Args: partition: Partition to overwrite with the new image. timeout_ms: Optional timeout in milliseconds to wait for it to finish. info_cb: See Download. Usually no messages. Returns: Response to a downloa...
def Flash(self, partition, timeout_ms=0, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._SimpleCommand(b'flash', arg=partition, info_cb=info_cb, timeout_ms=timeout_ms)
183,269
Erases the given partition. Args: partition: Partition to clear.
def Erase(self, partition, timeout_ms=None): self._SimpleCommand(b'erase', arg=partition, timeout_ms=timeout_ms)
183,270
Returns the given variable's definition. Args: var: A variable the bootloader tracks. Use 'all' to get them all. info_cb: See Download. Usually no messages. Returns: Value of var according to the current bootloader.
def Getvar(self, var, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._SimpleCommand(b'getvar', arg=var, info_cb=info_cb)
183,271
Executes an OEM command on the device. Args: command: Command to execute, such as 'poweroff' or 'bootconfig read'. timeout_ms: Optional timeout in milliseconds to wait for a response. info_cb: See Download. Messages vary based on command. Returns: The final resp...
def Oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): if not isinstance(command, bytes): command = command.encode('utf8') return self._SimpleCommand( b'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb)
183,272
Reboots the device. Args: target_mode: Normal reboot when unspecified. Can specify other target modes such as 'recovery' or 'bootloader'. timeout_ms: Optional timeout in milliseconds to wait for a response. Returns: Usually the empty string. Depends ...
def Reboot(self, target_mode=b'', timeout_ms=None): return self._SimpleCommand( b'reboot', arg=target_mode or None, timeout_ms=timeout_ms)
183,273
Push a file-like object to the device. Args: connection: ADB connection datafile: File-like object for reading from filename: Filename to push to st_mode: stat mode for filename mtime: modification time progress_callback: callback method that accepts ...
def Push(cls, connection, datafile, filename, st_mode=DEFAULT_PUSH_MODE, mtime=0, progress_callback=None): fileinfo = ('{},{}'.format(filename, int(st_mode))).encode('utf-8') cnxn = FileSyncConnection(connection, b'<2I') cnxn.Send(b'SEND', fileinfo) if progress_c...
183,285
Send/buffer FileSync packets. Packets are buffered and only flushed when this connection is read from. All messages have a response from the device, so this will always get flushed. Args: command_id: Command to send. data: Optional data to send, must set data or size. ...
def Send(self, command_id, data=b'', size=0): if data: if not isinstance(data, bytes): data = data.encode('utf8') size = len(data) if not self._CanAddToSendBuffer(len(data)): self._Flush() buf = struct.pack(b'<2I', self.id_to_wire[com...
183,287
Prints a directory listing. Args: device_path: Directory to list.
def List(device, device_path): files = device.List(device_path) files.sort(key=lambda x: x.filename) maxname = max(len(f.filename) for f in files) maxsize = max(len(str(f.size)) for f in files) for f in files: mode = ( ('d' if stat.S_ISDIR(f.mode) else '-') + ...
183,294
Runs a command on the device and prints the stdout. Args: command: Command to run on the target.
def Shell(device, *command): if command: return device.StreamingShell(' '.join(command)) else: # Retrieve the initial terminal prompt to use as a delimiter for future reads terminal_prompt = device.InteractiveShell() print(terminal_prompt.decode('utf-8')) # Accept u...
183,295
Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], assuming uniform distribution of set sizes within the interval. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. cum_counts: the complete cummula...
def _compute_nfp_uniform(l, u, cum_counts, sizes): if l > u: raise ValueError("l must be less or equal to u") if l == 0: n = cum_counts[u] else: n = cum_counts[u]-cum_counts[l-1] return n * float(sizes[u] - sizes[l]) / float(2*sizes[u])
183,565
Computes the matrix of expected false positives for all possible sub-intervals of the complete domain of set sizes, assuming uniform distribution of set_sizes within each sub-intervals. Args: cum_counts: the complete cummulative distribution of set sizes. sizes: the complete domain of set s...
def _compute_nfps_uniform(cum_counts, sizes): nfps = np.zeros((len(sizes), len(sizes))) # All u an l are inclusive bounds for intervals. # Compute p = 1, the NFPs for l in range(len(sizes)): for u in range(l, len(sizes)): nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes...
183,566
Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], using the real set size distribution. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. counts: the complete distribution of set sizes. si...
def _compute_nfp_real(l, u, counts, sizes): if l > u: raise ValueError("l must be less or equal to u") return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1])
183,567
Computes the matrix of expected false positives for all possible sub-intervals of the complete domain of set sizes. Args: counts: the complete distribution of set sizes. sizes: the complete domain of set sizes. Return (np.array): the 2-D array of expected number of false positives ...
def _compute_nfps_real(counts, sizes): nfps = np.zeros((len(sizes), len(sizes))) # All u an l are inclusive bounds for intervals. # Compute p = 1, the NFPs for l in range(len(sizes)): for u in range(l, len(sizes)): nfps[l, u] = _compute_nfp_real(l, u, counts, sizes) return n...
183,568
Initialize the slots of the LeanMinHash. Args: seed (int): The random seed controls the set of random permutation functions generated for this LeanMinHash. hashvalues: The hash values is the internal state of the LeanMinHash.
def _initialize_slots(self, seed, hashvalues): self.seed = seed self.hashvalues = self._parse_hashvalues(hashvalues)
183,605
Compute the byte size after serialization. Args: byteorder (str, optional): This is byte order of the serialized data. Use one of the `byte order characters <https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_: ``@``, ``=``,...
def bytesize(self, byteorder='@'): # Use 8 bytes to store the seed integer seed_size = struct.calcsize(byteorder+'q') # Use 4 bytes to store the number of hash values length_size = struct.calcsize(byteorder+'i') # Use 4 bytes to store each hash value as we are using the ...
183,607
Estimate the `Jaccard similarity`_ (resemblance) between the sets represented by this MinHash and the other. Args: other (datasketch.MinHash): The other MinHash. Returns: float: The Jaccard similarity, which is between 0.0 and 1.0.
def jaccard(self, other): if other.seed != self.seed: raise ValueError("Cannot compute Jaccard given MinHash with\ different seeds") if len(self) != len(other): raise ValueError("Cannot compute Jaccard given MinHash with\ different...
183,616
Merge the other MinHash with this one, making this one the union of both. Args: other (datasketch.MinHash): The other MinHash.
def merge(self, other): if other.seed != self.seed: raise ValueError("Cannot merge MinHash with\ different seeds") if len(self) != len(other): raise ValueError("Cannot merge MinHash with\ different numbers of permutation functions"...
183,618
Create a MinHash which is the union of the MinHash objects passed as arguments. Args: *mhs: The MinHash objects to be united. The argument list length is variable, but must be at least 2. Returns: datasketch.MinHash: A new union MinHash.
def union(cls, *mhs): if len(mhs) < 2: raise ValueError("Cannot union less than 2 MinHash") num_perm = len(mhs[0]) seed = mhs[0].seed if any((seed != m.seed or num_perm != len(m)) for m in mhs): raise ValueError("The unioning MinHash must have the\ ...
183,621
Index all sets given their keys, MinHashes, and sizes. It can be called only once after the index is created. Args: entries (`iterable` of `tuple`): An iterable of tuples, each must be in the form of `(key, minhash, size)`, where `key` is the unique identifie...
def index(self, entries): if not self.is_empty(): raise ValueError("Cannot call index again on a non-empty index") if not isinstance(entries, list): queue = deque([]) for key, minhash, size in entries: if size <= 0: raise V...
183,627
Giving the MinHash and size of the query set, retrieve keys that references sets with containment with respect to the query set greater than the threshold. Args: minhash (datasketch.MinHash): The MinHash of the query set. size (int): The size (number of unique items) of ...
def query(self, minhash, size): for i, index in enumerate(self.indexes): u = self.uppers[i] if u is None: continue b, r = self._get_optimal_param(u, size) for key in index[r]._query_b(minhash, b): yield key
183,628
Estimate the `weighted Jaccard similarity`_ between the multi-sets represented by this weighted MinHash and the other. Args: other (datasketch.WeightedMinHash): The other weighted MinHash. Returns: float: The weighted Jaccard similarity between 0.0 and 1.0. ...
def jaccard(self, other): if other.seed != self.seed: raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\ different seeds") if len(self) != len(other): raise ValueError("Cannot compute Jaccard given WeightedMinHash objects wit...
183,632
Create a new weighted MinHash given a weighted Jaccard vector. Each dimension is an integer frequency of the corresponding element in the multi-set represented by the vector. Args: v (numpy.array): The Jaccard vector.
def minhash(self, v): if not isinstance(v, collections.Iterable): raise TypeError("Input vector must be an iterable") if not len(v) == self.dim: raise ValueError("Input dimension mismatch, expecting %d" % self.dim) if not isinstance(v, np.ndarray): v ...
183,634
Remove the key from the index. Args: key (hashable): The unique identifier of a set.
def remove(self, key): if self.prepickle: key = pickle.dumps(key) if key not in self.keys: raise ValueError("The given key does not exist") for H, hashtable in zip(self.keys[key], self.hashtables): hashtable.remove_val(H, key) if not hasht...
183,648
Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above) restricted to the list of keys given. Args: keys (hashable) : the keys for which to get the bucket allocation counts
def get_subset_counts(self, *keys): if self.prepickle: key_set = [pickle.dumps(key) for key in set(keys)] else: key_set = list(set(keys)) hashtables = [unordered_storage({'type': 'dict'}) for _ in range(self.b)] Hss = self.keys.getma...
183,650
Merge the other HyperLogLog with this one, making this the union of the two. Args: other (datasketch.HyperLogLog):
def merge(self, other): if self.m != other.m or self.p != other.p: raise ValueError("Cannot merge HyperLogLog with different\ precisions.") self.reg = np.maximum(self.reg, other.reg)
183,667
Check equivalence between two HyperLogLogs Args: other (datasketch.HyperLogLog): Returns: bool: True if both have the same internal state.
def __eq__(self, other): return type(self) is type(other) and \ self.p == other.p and \ self.m == other.m and \ np.array_equal(self.reg, other.reg)
183,669
Add a unique key, together with a MinHash (or weighted MinHash) of the set referenced by the key. Note: The key won't be searchbale until the :func:`datasketch.MinHashLSHForest.index` method is called. Args: key (hashable): The unique identifier of the set. ...
def add(self, key, minhash): if len(minhash) < self.k*self.l: raise ValueError("The num_perm of MinHash out of range") if key in self.keys: raise ValueError("The given key has already been added") self.keys[key] = [self._H(minhash.hashvalues[start:end]) ...
183,687
Return the approximate top-k keys that have the highest Jaccard similarities to the query set. Args: minhash (datasketch.MinHash): The MinHash of the query set. k (int): The maximum number of keys to return. Returns: `list` of at most k keys.
def query(self, minhash, k): if k <= 0: raise ValueError("k must be positive") if len(minhash) < self.k*self.l: raise ValueError("The num_perm of MinHash out of range") results = set() r = self.k while r > 0: for key in self._query(min...
183,690
Find matching expectations within _expectation_config. Args: expectation_type=None : The name of the expectation type to be matched. column=None : The name of the column to be matched. expectation_kwargs=None : A dictionary...
def find_expectation_indexes(self, expectation_type=None, column=None, expectation_kwargs=None ): if expectation_kwargs == None: expectation_kwargs = {} ...
183,849
Get an evaluation parameter value that has been stored in meta. Args: parameter_name (string): The name of the parameter to store. default_value (any): The default value to be returned if the parameter is not found. Returns: The current value of the evaluation param...
def get_evaluation_parameter(self, parameter_name, default_value=None): if "evaluation_parameters" in self._expectations_config and \ parameter_name in self._expectations_config['evaluation_parameters']: return self._expectations_config['evaluation_parameters'][parameter_nam...
183,856
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations. Args: parameter_name (string): The name of the kwarg to be replaced at evaluation time parameter_value (any): The value to be used
def set_evaluation_parameter(self, parameter_name, parameter_value): if 'evaluation_parameters' not in self._expectations_config: self._expectations_config['evaluation_parameters'] = {} self._expectations_config['evaluation_parameters'].update( {parameter_name: paramet...
183,857
Convenience method for creating weights from categorical data. Args: data (list-like): The data from which to construct the estimate. Returns: A new partition object:: { "partition": (list) The categorical values present in the data "weights": (list...
def categorical_partition_data(data): # Make dropna explicit (even though it defaults to true) series = pd.Series(data) value_counts = series.value_counts(dropna=True) # Compute weights using denominator only of nonnull values null_indexes = series.isnull() nonnull_count = (null_indexes =...
183,867
This function will take a dataset and add expectations that each column present exists. Args: inspect_dataset (great_expectations.dataset): The dataset to inspect and to which to add expectations.
def columns_exist(inspect_dataset): # Attempt to get column names. For pandas, columns is just a list of strings if not hasattr(inspect_dataset, "columns"): warnings.warn( "No columns list found in dataset; no autoinspection performed.") return elif isinstance(inspect_datas...
183,877
Helper function to convert a dict object to one that is serializable Args: test_obj: an object to attempt to convert a corresponding json-serializable object Returns: (dict) A converted test_object Warning: test_obj may also be converted in place.
def recursively_convert_to_json_serializable(test_obj): # Validate that all aruguments are of approved types, coerce if it's easy, else exception # print(type(test_obj), test_obj) # Note: Not 100% sure I've resolved this correctly... try: if not isinstance(test_obj, list) and np.isnan(test_...
183,902
Read a file using Pandas read_excel and return a great_expectations dataset. Args: filename (string): path to file to read dataset_class (Dataset class): class to which to convert resulting Pandas df expectations_config (string): path to great_expectations config file Returns: ...
def read_excel( filename, dataset_class=dataset.pandas_dataset.PandasDataset, expectations_config=None, autoinspect_func=None, *args, **kwargs ): df = pd.read_excel(filename, *args, **kwargs) if isinstance(df, dict): for key in df: df[key] = _convert_to_dataset_class...
183,913
Context manager for creating a unix-domain socket and listen for ffmpeg progress events. The socket filename is yielded from the context manager and the socket is closed when the context manager is exited. Args: handler: a function to be called when progress events are received; re...
def _watch_progress(handler): with _tmpdir_scope() as tmpdir: socket_filename = os.path.join(tmpdir, 'sock') sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) with contextlib.closing(sock): sock.bind(socket_filename) sock.listen(1) child = geve...
187,664
Change the PTS (presentation timestamp) of the input frames. Args: expr: The expression which is evaluated for each frame to construct its timestamp. Official documentation: `setpts, asetpts <https://ffmpeg.org/ffmpeg-filters.html#setpts_002c-asetpts>`__
def setpts(stream, expr): return FilterNode(stream, setpts.__name__, args=[expr]).stream()
187,673
Crop the input video. Args: x: The horizontal position, in the input video, of the left edge of the output video. y: The vertical position, in the input video, of the top edge of the output video. width: The width of the output video. Must be greater than 0. he...
def crop(stream, x, y, width, height, **kwargs): return FilterNode( stream, crop.__name__, args=[width, height, x, y], kwargs=kwargs ).stream()
187,676