code stringlengths 66 870k | docstring stringlengths 19 26.7k | func_name stringlengths 1 138 | language stringclasses 1
value | repo stringlengths 7 68 | path stringlengths 5 324 | url stringlengths 46 389 | license stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def logerror(logger: logging.Logger = logging.root):
"""A decorator that wraps the passed in function and logs exceptions.
Parameters
----------
logger: logging.Logger
The logger to which to log the error.
"""
def log_wrapper(function):
@functools.wraps(function)
def wra... | A decorator that wraps the passed in function and logs exceptions.
Parameters
----------
logger: logging.Logger
The logger to which to log the error.
| logerror | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue) | Collect data into fixed-length chunks or blocks | grouper | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def repeat(iterable, count=None):
"""Repeat a basic iterator for multiple rounds
Parameters
----------
iterable
The basic iterable
count
Repeat the basic iterable for "count" times. If it is None, it will be an infinite iterator.
Returns
-------
new_iterable
A n... | Repeat a basic iterator for multiple rounds
Parameters
----------
iterable
The basic iterable
count
Repeat the basic iterable for "count" times. If it is None, it will be an infinite iterator.
Returns
-------
new_iterable
A new iterable in which the basic iterator h... | repeat | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def load_checksum_stats(path: str) -> dict:
"""
Parameters
----------
path
Path to the stored checksum
Returns
-------
file_stats
"""
file_stats = dict()
with open(path, 'r', encoding='utf-8') as f:
for line in f:
name, hex_hash, file_size = line.str... |
Parameters
----------
path
Path to the stored checksum
Returns
-------
file_stats
| load_checksum_stats | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def download_file_from_google_drive(file_id, dest_path, overwrite=False, showsize=False):
"""Downloads a shared file from google drive into a given folder.
Optionally unzips it.
Parameters
----------
file_id: str
the file identifier.
You can obtain it fro... | Downloads a shared file from google drive into a given folder.
Optionally unzips it.
Parameters
----------
file_id: str
the file identifier.
You can obtain it from the sharable link.
dest_path: str
the destination where to save the downloaded ... | download_file_from_google_drive | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def download(url: str,
path: Optional[str] = None,
overwrite: Optional[bool] = False,
sha1_hash: Optional[str] = None,
retries: Optional[int] = 5,
verify_ssl: Optional[bool] = True,
anonymous_credential: Optional[bool] = True) -> str:
"""... | Download a given URL
Parameters
----------
url
URL to download
path
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite
Whether to overwrite destination file if already exists.
sha1_hash
... | download | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def check_version(min_version: str,
warning_only: bool = False,
library: Optional[ModuleType] = None):
"""Check the version of gluonnlp satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version
... | Check the version of gluonnlp satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version
Minimum version
warning_only
Printing a warning instead of throwing an exception.
library
The target library for ver... | check_version | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def init_comm(backend, gpus):
"""Init communication backend
Parameters
----------
backend
The communication backend
gpus
Returns
-------
store
The kvstore
num_workers
The total number of workers
rank
local_rank
is_master_node
ctx_l
"""
... | Init communication backend
Parameters
----------
backend
The communication backend
gpus
Returns
-------
store
The kvstore
num_workers
The total number of workers
rank
local_rank
is_master_node
ctx_l
| init_comm | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def get_mxnet_visible_ctx():
"""Get the visible contexts in MXNet.
- If GPU is available
it will return all the visible GPUs, which can be controlled via "CUDA_VISIBLE_DEVICES".
- If no GPU is available
it will return the cpu device.
Returns
-------
ctx_l
The recommende... | Get the visible contexts in MXNet.
- If GPU is available
it will return all the visible GPUs, which can be controlled via "CUDA_VISIBLE_DEVICES".
- If no GPU is available
it will return the cpu device.
Returns
-------
ctx_l
The recommended contexts to use for MXNet
| get_mxnet_visible_ctx | python | dmlc/gluon-nlp | src/gluonnlp/utils/misc.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/misc.py | Apache-2.0 |
def __init__(self, params=None):
"""Maintain a set of shadow variables "v" that is calculated by
v[:] = (1 - 1/t) v + 1/t \theta
The t is the number of training steps.
It is also known as "Polyak-Rupert averaging" applied to SGD and was rediscovered in
"Towards Optimal One... | Maintain a set of shadow variables "v" that is calculated by
v[:] = (1 - 1/t) v + 1/t heta
The t is the number of training steps.
It is also known as "Polyak-Rupert averaging" applied to SGD and was rediscovered in
"Towards Optimal One Pass Large Scale Learning withAveraged Stoch... | __init__ | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def apply(self, params):
""" Tell the moving average tracker which parameters we are going to track.
Parameters
----------
params : ParameterDict
The parameters that we are going to track and calculate the moving average.
"""
assert self._track_params is None... | Tell the moving average tracker which parameters we are going to track.
Parameters
----------
params : ParameterDict
The parameters that we are going to track and calculate the moving average.
| apply | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def copy_back(self, params=None):
""" Copy the average parameters back to the given parameters
Parameters
----------
params : ParameterDict
The parameters that we will copy tha average params to.
If it is not given, the tracked parameters will be updated
... | Copy the average parameters back to the given parameters
Parameters
----------
params : ParameterDict
The parameters that we will copy tha average params to.
If it is not given, the tracked parameters will be updated
| copy_back | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def grad_global_norm(parameters: Iterable[Parameter]) -> float:
"""Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`, if `max_norm` if provided.
If gradients exist for more than one context for a parameter, user needs t... | Calculate the 2-norm of gradients of parameters, and how much they should be scaled down
such that their 2-norm does not exceed `max_norm`, if `max_norm` if provided.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients... | grad_global_norm | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def clip_grad_global_norm(parameters: Iterable[Parameter],
max_norm: float,
check_isfinite: bool = True) -> Tuple[float, float, bool]:
"""Rescales gradients of parameters so that the sum of their 2-norm is smaller than `max_norm`.
If gradients exist for more t... | Rescales gradients of parameters so that the sum of their 2-norm is smaller than `max_norm`.
If gradients exist for more than one context for a parameter, user needs to explicitly call
``trainer.allreduce_grads`` so that the gradients are summed first before calculating
the 2-norm.
.. note::
T... | clip_grad_global_norm | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def move_to_ctx(arr, ctx):
"""Move a nested structure of array to the given context
Parameters
----------
arr
The input array
ctx
The MXNet context
Returns
-------
new_arr
The array that has been moved to context
"""
if isinstance(arr, tuple):
re... | Move a nested structure of array to the given context
Parameters
----------
arr
The input array
ctx
The MXNet context
Returns
-------
new_arr
The array that has been moved to context
| move_to_ctx | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def deduplicate_param_dict(param_dict):
"""Get a parameter dict that has been deduplicated
Parameters
----------
param_dict
The parameter dict returned by `model.collect_params()`
Returns
-------
dedup_param_dict
"""
dedup_param_dict = dict()
param_uuid_set = set()
... | Get a parameter dict that has been deduplicated
Parameters
----------
param_dict
The parameter dict returned by `model.collect_params()`
Returns
-------
dedup_param_dict
| deduplicate_param_dict | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def count_parameters(params) -> Tuple[int, int]:
"""
Parameters
----------
params
The input parameter dict
Returns
-------
num_params
The number of parameters that requires gradient
num_fixed_params
The number of parameters that does not require gradient
"""... |
Parameters
----------
params
The input parameter dict
Returns
-------
num_params
The number of parameters that requires gradient
num_fixed_params
The number of parameters that does not require gradient
| count_parameters | python | dmlc/gluon-nlp | src/gluonnlp/utils/parameter.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/parameter.py | Apache-2.0 |
def get_trimmed_lengths(lengths: List[int],
max_length: int,
do_merge: bool = False) -> np.ndarray:
"""Get the trimmed lengths of multiple text data. It will make sure that
the trimmed length is smaller than or equal to the max_length
- do_merge is True
... | Get the trimmed lengths of multiple text data. It will make sure that
the trimmed length is smaller than or equal to the max_length
- do_merge is True
Make sure that sum(trimmed_lengths) <= max_length.
The strategy is to always try to trim the longer lengths.
- do_merge is False
Mak... | get_trimmed_lengths | python | dmlc/gluon-nlp | src/gluonnlp/utils/preprocessing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/preprocessing.py | Apache-2.0 |
def match_tokens_with_char_spans(token_offsets: np.ndarray,
spans: np.ndarray) -> np.ndarray:
"""Match the span offsets with the character-level offsets.
For each span, we perform the following:
1: Cutoff the boundary
span[0] = max(span[0], token_offsets[0, 0])
... | Match the span offsets with the character-level offsets.
For each span, we perform the following:
1: Cutoff the boundary
span[0] = max(span[0], token_offsets[0, 0])
span[1] = min(span[1], token_offsets[-1, 1])
2: Find start + end
We try to select the smallest number of tokens that c... | match_tokens_with_char_spans | python | dmlc/gluon-nlp | src/gluonnlp/utils/preprocessing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/preprocessing.py | Apache-2.0 |
def register(self, *args):
"""
Register the given object under either the nickname or `obj.__name__`. It can be used as
either a decorator or not. See docstring of this class for usage.
"""
if len(args) == 2:
# Register an object with nick name by function call
... |
Register the given object under either the nickname or `obj.__name__`. It can be used as
either a decorator or not. See docstring of this class for usage.
| register | python | dmlc/gluon-nlp | src/gluonnlp/utils/registry.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/registry.py | Apache-2.0 |
def create(self, name: str, *args, **kwargs) -> object:
"""Create the class object with the given args and kwargs
Parameters
----------
name
The name in the registry
args
kwargs
Returns
-------
ret
The created object
... | Create the class object with the given args and kwargs
Parameters
----------
name
The name in the registry
args
kwargs
Returns
-------
ret
The created object
| create | python | dmlc/gluon-nlp | src/gluonnlp/utils/registry.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/registry.py | Apache-2.0 |
def serialize(path, tbl):
"""Serialize tbl with out-of-band data to path for zero-copy shared memory usage.
If the object to be serialized itself, or the objects it uses for data
storage (such as numpy arrays) implement the the pickle protocol version 5
pickle.PickleBuffer type in __reduce_ex__, then t... | Serialize tbl with out-of-band data to path for zero-copy shared memory usage.
If the object to be serialized itself, or the objects it uses for data
storage (such as numpy arrays) implement the the pickle protocol version 5
pickle.PickleBuffer type in __reduce_ex__, then this function can store
these ... | serialize | python | dmlc/gluon-nlp | src/gluonnlp/utils/shm.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/shm.py | Apache-2.0 |
def load(path):
"""Load serialized object with out-of-band data from path based on zero-copy shared memory.
Parameters
----------
path : pathlib.Path
Folder used to save serialized data with serialize(). Usually a folder /dev/shm
"""
num_buffers = len(list(path.iterdir())) - 1 # exclu... | Load serialized object with out-of-band data from path based on zero-copy shared memory.
Parameters
----------
path : pathlib.Path
Folder used to save serialized data with serialize(). Usually a folder /dev/shm
| load | python | dmlc/gluon-nlp | src/gluonnlp/utils/shm.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/shm.py | Apache-2.0 |
def is_match_states_batch_size(states, states_batch_axis, batch_size) -> bool:
"""Test whether the generated states have the specified batch size
Parameters
----------
states
The states structure
states_batch_axis
The states batch axis structure
batch_size
The batch size... | Test whether the generated states have the specified batch size
Parameters
----------
states
The states structure
states_batch_axis
The states batch axis structure
batch_size
The batch size
Returns
-------
ret
| is_match_states_batch_size | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def verify_nmt_model(model, batch_size: int = 4,
src_seq_length: int = 5,
tgt_seq_length: int = 10,
atol: float = 1E-4,
rtol: float = 1E-3):
"""Verify the correctness of an NMT model. Raise error message if it detects problems.
... | Verify the correctness of an NMT model. Raise error message if it detects problems.
Parameters
----------
model
The machine translation model
batch_size
The batch size to test the nmt model
src_seq_length
Length of the source sequence
tgt_seq_length
Length of the... | verify_nmt_model | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def verify_nmt_inference(train_model, inference_model,
batch_size=4, src_seq_length=5,
tgt_seq_length=10, atol=1E-4, rtol=1E-3):
"""Verify the correctness of an NMT inference model. Raise error message if it detects
any problems.
Parameters
----------
... | Verify the correctness of an NMT inference model. Raise error message if it detects
any problems.
Parameters
----------
train_model
The training model
inference_model
The inference model
batch_size
Batch size
src_seq_length
Length of the source sequence
t... | verify_nmt_inference | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def _cast_nested_to_fp16(nested_dat):
"""Cast the nested input to fp16
Parameters
----------
dat
The input nested data structure
Returns
-------
output
The casted output data
"""
if isinstance(nested_dat, (mx.np.ndarray, np.ndarray)):
if nested_dat.dtype == ... | Cast the nested input to fp16
Parameters
----------
dat
The input nested data structure
Returns
-------
output
The casted output data
| _cast_nested_to_fp16 | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def verify_backbone_fp16(model_cls, cfg, ctx, inputs,
atol=1E-2, rtol=1E-2, check_amp=True):
"""Test whether the backbone model has the comparable parameter gradient +
Parameters
----------
model_cls
The modeling class
cfg
The configuration
ctx
T... | Test whether the backbone model has the comparable parameter gradient +
Parameters
----------
model_cls
The modeling class
cfg
The configuration
ctx
The context
inputs
The input tensors of the model. We will
atol
The absolute tolerance
rtol
... | verify_backbone_fp16 | python | dmlc/gluon-nlp | src/gluonnlp/utils/testing.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/testing.py | Apache-2.0 |
def get_ec2_tvm_flags() -> Dict[str, Dict]:
r"""Return the recommended flags for TVM compilation in AWS EC2 instances.
Including C4, C5, G4, P3.
For more details about AWS EC2 instances, refer to https://aws.amazon.com/ec2/instance-types/.
Returns
-------
info_dict
A dictionary that c... | Return the recommended flags for TVM compilation in AWS EC2 instances.
Including C4, C5, G4, P3.
For more details about AWS EC2 instances, refer to https://aws.amazon.com/ec2/instance-types/.
Returns
-------
info_dict
A dictionary that contains the mapping between instance type and the
... | get_ec2_tvm_flags | python | dmlc/gluon-nlp | src/gluonnlp/utils/tvm_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/tvm_utils.py | Apache-2.0 |
def update_tvm_convert_map() -> None:
"""A Monkey Patch to update convert map in tvm/relay/frontend/mxnet.py"""
op = (('masked_softmax', _mx_masked_softmax),)
_convert_map.update({key: value for key, value in op}) | A Monkey Patch to update convert map in tvm/relay/frontend/mxnet.py | update_tvm_convert_map | python | dmlc/gluon-nlp | src/gluonnlp/utils/tvm_utils.py | https://github.com/dmlc/gluon-nlp/blob/master/src/gluonnlp/utils/tvm_utils.py | Apache-2.0 |
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue) | Collect data into fixed-length chunks or blocks | grouper | python | dmlc/gluon-nlp | tests/test_gluon_block.py | https://github.com/dmlc/gluon-nlp/blob/master/tests/test_gluon_block.py | Apache-2.0 |
def test_test():
"""Test that fixing a random seed works."""
py_rnd = random.randint(0, 100)
np_rnd = np.random.randint(0, 100)
mx_rnd = mx.nd.random_uniform(shape=(1, )).asscalar()
random.seed(1)
mx.random.seed(1)
np.random.seed(1)
assert py_rnd == random.randint(0, 100)
assert np... | Test that fixing a random seed works. | test_test | python | dmlc/gluon-nlp | tests/test_pytest.py | https://github.com/dmlc/gluon-nlp/blob/master/tests/test_pytest.py | Apache-2.0 |
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS) | Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
| is_image_file | python | ajbrock/BigGAN-PyTorch | datasets.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py | MIT |
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
if self.load_in_mem:
img = self.data[index]
target = self.labels[index]
else:
path, target = self.imgs[index]
... |
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
| __getitem__ | python | ajbrock/BigGAN-PyTorch | datasets.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py | MIT |
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# If loaded the entire dataset in RAM, get image from memory
if self.load_in_mem:
img = self.data[index]
target = self.labe... |
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
| __getitem__ | python | ajbrock/BigGAN-PyTorch | datasets.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py | MIT |
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
... |
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
| __getitem__ | python | ajbrock/BigGAN-PyTorch | datasets.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/datasets.py | MIT |
def torch_cov(m, rowvar=False):
'''Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The el... | Estimate a covariance matrix given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element `C_{ij}` is the covariance of
`x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`... | torch_cov | python | ajbrock/BigGAN-PyTorch | inception_utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py | MIT |
def numpy_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1... | Numpy implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
... | numpy_calculate_frechet_distance | python | ajbrock/BigGAN-PyTorch | inception_utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py | MIT |
def torch_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C... | Pytorch implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params... | torch_calculate_frechet_distance | python | ajbrock/BigGAN-PyTorch | inception_utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/inception_utils.py | MIT |
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
size = (min(img.size), min(img.size))
# Only step forward along this edge if it's the long edge
i = (0 if size[0] == img.size[0]
else np.random.randint(l... |
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
| __call__ | python | ajbrock/BigGAN-PyTorch | utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py | MIT |
def log(self, record=None, **kwargs):
"""
Assumption: no newlines in the input.
"""
if record is None:
record = {}
record.update(kwargs)
record['_stamp'] = time.time()
with open(self.fname, 'a') as f:
f.write(json.dumps(record, ensure_ascii=True) + '\n') |
Assumption: no newlines in the input.
| log | python | ajbrock/BigGAN-PyTorch | utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py | MIT |
def progress(items, desc='', total=None, min_delay=0.1, displaytype='s1k'):
"""
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no... |
Returns a generator over `items`, printing the number and percentage of
items processed and the estimated remaining processing time before yielding
the next item. `total` gives the total number of items (required if `items`
has no length), and `min_delay` gives the minimum time in seconds between
subsequent ... | progress | python | ajbrock/BigGAN-PyTorch | utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py | MIT |
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in g... | Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
| step | python | ajbrock/BigGAN-PyTorch | utils.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/utils.py | MIT |
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, ke... | Reduce the sum and square-sum, compute the statistics, and broadcast it. | _data_parallel_master | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/batchnorm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/batchnorm.py | MIT |
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum... | Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device. | _compute_mean_std | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/batchnorm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/batchnorm.py | MIT |
def __init__(self, master_callback):
"""
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
"""
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
... |
Args:
master_callback: a callback to be invoked after having collected messages from slave devices.
| __init__ | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/comm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py | MIT |
def register_slave(self, identifier):
"""
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
"""
if self._activated:
assert ... |
Register an slave device.
Args:
identifier: an identifier, usually is the device id.
Returns: a `SlavePipe` object which can be used to communicate with the master device.
| register_slave | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/comm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py | MIT |
def run_master(self, master_msg):
"""
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including t... |
Main entry for the master device in each forward pass.
The messages were first collected from each devices (including the master device), and then
an callback will be invoked to compute the message to be sent back to each devices
(including the master device).
Args:
... | run_master | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/comm.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/comm.py | MIT |
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign eac... |
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multi... | execute_replication_callbacks | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/replicate.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/replicate.py | MIT |
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParal... |
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replic... | patch_replication_callback | python | ajbrock/BigGAN-PyTorch | sync_batchnorm/replicate.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/sync_batchnorm/replicate.py | MIT |
def dump_tfhub_to_hdf5(module_path, hdf5_path, redownload=False):
"""Loads TFHub weights and saves them to intermediate HDF5 file.
Args:
module_path ([Path-like]): Path to TFHub module.
hdf5_path ([Path-like]): Path to output HDF5 file.
Returns:
[h5py.File]: Loaded hdf5 file containing module weight... | Loads TFHub weights and saves them to intermediate HDF5 file.
Args:
module_path ([Path-like]): Path to TFHub module.
hdf5_path ([Path-like]): Path to output HDF5 file.
Returns:
[h5py.File]: Loaded hdf5 file containing module weights.
| dump_tfhub_to_hdf5 | python | ajbrock/BigGAN-PyTorch | TFHub/converter.py | https://github.com/ajbrock/BigGAN-PyTorch/blob/master/TFHub/converter.py | MIT |
def read_img(t_imgfname, input_size, img_mean): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of... | Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale the images prior
to rand... | read_img | python | iyah4888/SIGGRAPH18SSS | main_hyper.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/main_hyper.py | MIT |
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of ... | Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
| get_arguments | python | iyah4888/SIGGRAPH18SSS | parse_opt.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/parse_opt.py | MIT |
def __init__(self, sess, args):
"""Initialize the parameters.
sess: tensorflow session
"""
self.sess = sess
self.batch_size = args.batch_size
self.args = args
# parameters used to save a checkpoint
self.dataset = "Hypcol"
self.options = []
self._attrs = ['batch_size', 'dataset']
self.build_mo... | Initialize the parameters.
sess: tensorflow session
| __init__ | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/hc_deeplab.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/hc_deeplab.py | MIT |
def image_scaling(img, label):
"""
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
img: Training image to scale.
label: Segmentation mask to scale.
"""
scale = tf.random_uniform([1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None)
h_new = tf.to... |
Randomly scales the images between 0.5 to 1.5 times the original size.
Args:
img: Training image to scale.
label: Segmentation mask to scale.
| image_scaling | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def image_mirroring(img, label):
"""
Randomly mirrors the images.
Args:
img: Training image to mirror.
label: Segmentation mask to mirror.
"""
distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32)[0]
mirror = tf.less(tf.stack([1.0, distort_left_right_rand... |
Randomly mirrors the images.
Args:
img: Training image to mirror.
label: Segmentation mask to mirror.
| image_mirroring | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def random_crop_and_pad_image_and_labels(image, label, crop_h, crop_w, ignore_label=255):
"""
Randomly crop and pads the input images.
Args:
image: Training image to crop/ pad.
label: Segmentation mask to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment... |
Randomly crop and pads the input images.
Args:
image: Training image to crop/ pad.
label: Segmentation mask to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
ignore_label: Label to ignore during the training.
| random_crop_and_pad_image_and_labels | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two l... | Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respective... | read_labeled_image_list | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def read_data_list(data_dir, data_list, ext):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists... | Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respective... | read_data_list | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def read_images_from_disk(input_queue, input_size, random_scale, random_mirror, ignore_label, img_mean): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a... | Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale th... | read_images_from_disk | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def __init__(self, data_dir, data_list, input_size,
random_scale, random_mirror, ignore_label, img_mean, coord):
'''Initialise an ImageReader.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/... | Initialise an ImageReader.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
input_size: a tuple with (height, width) values, to which all the images will be resized.
ra... | __init__ | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def dequeue(self, num_elements):
'''Pack images and labels into a batch.
Args:
num_elements: the batch size.
Returns:
Two tensors of size (batch_size, h, w, {3, 1}) for images and masks.'''
image_batch, label_batch = tf.train.batch([self.image, sel... | Pack images and labels into a batch.
Args:
num_elements: the batch size.
Returns:
Two tensors of size (batch_size, h, w, {3, 1}) for images and masks. | dequeue | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def read_an_image_from_disk(t_imgfname, t_labelfname, input_size, random_scale, random_mirror, ignore_label, img_mean): # optional pre-processing arguments
"""Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
... | Read one image and its corresponding mask with optional pre-processing.
Args:
input_queue: tf queue with paths to the image and its mask.
input_size: a tuple with (height, width) values.
If not given, return images of original size.
random_scale: whether to randomly scale th... | read_an_image_from_disk | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/image_reader.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/image_reader.py | MIT |
def setup(self, is_training, num_classes):
'''Network definition.
Args:
is_training: whether to update the running mean and variance of the batch normalisation layer.
If the batch size is small, it is better to keep the running mean and variance of
... | Network definition.
Args:
is_training: whether to update the running mean and variance of the batch normalisation layer.
If the batch size is small, it is better to keep the running mean and variance of
the-pretrained model frozen.
num_... | setup | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/model.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/model.py | MIT |
def decode_labels(mask, num_images=1, num_classes=21):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
... | Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the ... | decode_labels | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/utils.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py | MIT |
def prepare_label(input_batch, new_size, num_classes, one_hot=True):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
num_classes: number of classes to predict (including background).
... | Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
num_classes: number of classes to predict (including background).
one_hot: whether perform one-hot encoding.
Returns:
Outputs a te... | prepare_label | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/utils.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py | MIT |
def inv_preprocess(imgs, num_images, img_mean):
"""Inverse preprocessing of the batch of images.
Add the mean vector and convert from BGR to RGB.
Args:
imgs: batch of input images.
num_images: number of images to apply the inverse transformations on.
img_mean: vector of mean col... | Inverse preprocessing of the batch of images.
Add the mean vector and convert from BGR to RGB.
Args:
imgs: batch of input images.
num_images: number of images to apply the inverse transformations on.
img_mean: vector of mean colour values.
Returns:
The batch of the size... | inv_preprocess | python | iyah4888/SIGGRAPH18SSS | deeplab_resnet/utils.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/deeplab_resnet/utils.py | MIT |
def __init__(self, def_path, phase='test'):
'''
def_path: Path to the model definition (.prototxt)
data_path: Path to the model data (.caffemodel)
phase: Either 'test' or 'train'. Used for filtering phase-specific nodes.
'''
self.def_path = def_path
self.phase = p... |
def_path: Path to the model definition (.prototxt)
data_path: Path to the model data (.caffemodel)
phase: Either 'test' or 'train'. Used for filtering phase-specific nodes.
| __init__ | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def load(self):
'''Load the layer definitions from the prototxt.'''
self.params = get_caffe_resolver().NetParameter()
with open(self.def_path, 'rb') as def_file:
text_format.Merge(def_file.read(), self.params) | Load the layer definitions from the prototxt. | load | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def filter_layers(self, layers):
'''Filter out layers based on the current phase.'''
phase_map = {0: 'train', 1: 'test'}
filtered_layer_names = set()
filtered_layers = []
for layer in layers:
phase = self.phase
if len(layer.include):
phase ... | Filter out layers based on the current phase. | filter_layers | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def make_node(self, layer):
'''Create a graph node for the given layer.'''
kind = NodeKind.map_raw_kind(layer.type)
if kind is None:
raise KaffeError('Unknown layer type encountered: %s' % layer.type)
# We want to use the layer's top names (the "output" names), rather than th... | Create a graph node for the given layer. | make_node | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def make_input_nodes(self):
'''
Create data input nodes.
This method is for old-style inputs, where the input specification
was not treated as a first-class layer in the prototext.
Newer models use the "Input layer" type.
'''
nodes = [Node(name, NodeKind.Data) fo... |
Create data input nodes.
This method is for old-style inputs, where the input specification
was not treated as a first-class layer in the prototext.
Newer models use the "Input layer" type.
| make_input_nodes | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def build(self):
'''
Builds the graph from the Caffe layer definitions.
'''
# Get the layers
layers = self.params.layers or self.params.layer
# Filter out phase-excluded layers
layers = self.filter_layers(layers)
# Get any separately-specified input layers... |
Builds the graph from the Caffe layer definitions.
| build | python | iyah4888/SIGGRAPH18SSS | kaffe/graph.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/graph.py | MIT |
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict... | Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
| load | python | iyah4888/SIGGRAPH18SSS | kaffe/tensorflow/network.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py | MIT |
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):... | Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
| feed | python | iyah4888/SIGGRAPH18SSS | kaffe/tensorflow/network.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py | MIT |
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in list(self.layers.items())) + 1
return '%s_%d' % (prefix, ident... | Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
| get_unique_name | python | iyah4888/SIGGRAPH18SSS | kaffe/tensorflow/network.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/network.py | MIT |
def get_padding_type(kernel_params, input_shape, output_shape):
'''Translates Caffe's numeric padding to one of ('SAME', 'VALID').
Caffe supports arbitrary padding values, while TensorFlow only
supports 'SAME' and 'VALID' modes. So, not all Caffe paddings
can be translated to TensorFlow. There are some ... | Translates Caffe's numeric padding to one of ('SAME', 'VALID').
Caffe supports arbitrary padding values, while TensorFlow only
supports 'SAME' and 'VALID' modes. So, not all Caffe paddings
can be translated to TensorFlow. There are some subtleties to
how the padding edge-cases are handled. These are des... | get_padding_type | python | iyah4888/SIGGRAPH18SSS | kaffe/tensorflow/transformer.py | https://github.com/iyah4888/SIGGRAPH18SSS/blob/master/kaffe/tensorflow/transformer.py | MIT |
def run(
self,
query="What is a lagrangian?",
limit_broad_results=1_000,
limit_deduped_url_results=50,
limit_hierarchical_url_results=50,
limit_final_pagerank_results=20,
url_contains_filter=None,
):
"""Run a search query using the WebSearchEngine clie... | Run a search query using the WebSearchEngine client | run | python | SciPhi-AI/agent-search | agent_search/app/server.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/app/server.py | Apache-2.0 |
def to_string_dict(self) -> dict:
"""Returns a dictionary representation with all values as strings."""
return {
"score": str(self.score),
"url": self.url,
"title": self.title,
"dataset": self.dataset,
"metadata": self.metadata,
"te... | Returns a dictionary representation with all values as strings. | to_string_dict | python | SciPhi-AI/agent-search | agent_search/core/search_types.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/search_types.py | Apache-2.0 |
def select_top_urls(
ordered_points: List[AgentSearchResult],
max_urls: int = 10,
url_contains: Optional[List[str]] = None,
) -> List[str]:
"""A function to return the top unique URLs from the given poitns results."""
if not url_contains:
url_contains = []
top_urls = set([])
for poi... | A function to return the top unique URLs from the given poitns results. | select_top_urls | python | SciPhi-AI/agent-search | agent_search/core/utils.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/utils.py | Apache-2.0 |
def cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:
"""Compute the cosine similarity between two vectors."""
dot_product = np.dot(v1, v2)
norm_v1 = np.linalg.norm(v1)
norm_v2 = np.linalg.norm(v2)
return dot_product / (norm_v1 * norm_v2) | Compute the cosine similarity between two vectors. | cosine_similarity | python | SciPhi-AI/agent-search | agent_search/core/utils.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/core/utils.py | Apache-2.0 |
def __init__(
self,
api_base: Optional[str] = None,
api_key: Optional[str] = None,
timeout: int = 30,
) -> None:
"""
Initializes the SciPhi client.
Args:
api_base (Optional[str]): Base URL for the SciPhi API.
api_key (Optional[str]): A... |
Initializes the SciPhi client.
Args:
api_base (Optional[str]): Base URL for the SciPhi API.
api_key (Optional[str]): API key for authenticating requests.
timeout (int): Timeout for API requests in seconds.
Raises:
ValueError: If `api_key` is not... | __init__ | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def _handle_api_response(self, response: httpx.Response) -> Dict:
"""
Handles the HTTP response from the API.
Args:
response (httpx.Response): The response from the API request.
Returns:
Dict: JSON response content.
Raises:
Exception: If the... |
Handles the HTTP response from the API.
Args:
response (httpx.Response): The response from the API request.
Returns:
Dict: JSON response content.
Raises:
Exception: If the response indicates an error.
| _handle_api_response | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def _handle_search_response(self, search_results: Dict[str, str]) -> None:
"""
Handles dictionary search resopnses from the API.
Args:
search_results (Dict[str, str]): The response from the API request.
Returns:
Dict: JSON response content.
Raises:
... |
Handles dictionary search resopnses from the API.
Args:
search_results (Dict[str, str]): The response from the API request.
Returns:
Dict: JSON response content.
Raises:
Exception: If the response indicates an error.
| _handle_search_response | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def _retry_api_request(
self, method: str, url: str, payload: Dict, max_retries: int = 3
):
"""
Common method for retrying API requests with exponential backoff.
Args:
method (str): The HTTP method to use ('get' or 'post').
url (str): The API endpoint.
... |
Common method for retrying API requests with exponential backoff.
Args:
method (str): The HTTP method to use ('get' or 'post').
url (str): The API endpoint.
payload (Dict): The payload for the request.
max_retries (int): Maximum number of retry attempts.... | _retry_api_request | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def search(
self, query: str, search_provider: str, max_retries: int = 3
) -> List[Dict]:
"""
Performs a search query using the SciPhi API with retry and backoff logic.
Args:
query (str): The search query string.
search_provider (str): The search provider to ... |
Performs a search query using the SciPhi API with retry and backoff logic.
Args:
query (str): The search query string.
search_provider (str): The search provider to use.
max_retries (int): Maximum number of retry attempts.
Returns:
List[Dict]: A lis... | search | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def get_search_rag_response(
self,
query: str,
search_provider: str,
llm_model: str = "SciPhi/Sensei-7B-V1",
temperature: int = 0.2,
top_p: int = 0.95,
):
"""
Retrieves a search RAG (Retrieval-Augmented Generation) response from the API.
Args:... |
Retrieves a search RAG (Retrieval-Augmented Generation) response from the API.
Args:
query (str): The search query string.
search_provider (str): The search provider to use.
llm_model (str): The language model to use.
temperature (int): The temperature s... | get_search_rag_response | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def completion(
self,
prompt: str,
llm_model_name: str = "SciPhi/Sensei-7B-V1",
llm_max_tokens_to_sample: int = 1_024,
llm_temperature: float = 0.2,
llm_top_p: float = 0.90,
) -> SearchRAGResponse:
"""
Generates a completion for a given prompt using th... |
Generates a completion for a given prompt using the SciPhi API.
Args:
prompt (str): The prompt for generating completion.
llm_model_name (str): The language model to use.
llm_max_tokens_to_sample (int): Maximum number of tokens for the sample.
llm_temper... | completion | python | SciPhi-AI/agent-search | agent_search/providers/sciphi.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/providers/sciphi.py | Apache-2.0 |
def process_rows(rows, output_queue):
"""Process the rows into qdrant point objects."""
qdrant_points = []
for row in rows:
_, url, __, text_chunks, embeddings_binary, ___, ____ = row
embeddings = np.frombuffer(
embeddings_binary, dtype=np.float32
).reshape(-1, EMBEDDING_... | Process the rows into qdrant point objects. | process_rows | python | SciPhi-AI/agent-search | agent_search/scripts/populate_qdrant_from_postgres.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py | Apache-2.0 |
def qdrant_writer(config, qdrant_queue, delete_existing):
"""A writer that listens for output events in a separate thread."""
qclient = QdrantClient(
config["qdrant_host"],
port=config["qdrant_grpc_port"],
prefer_grpc=config["qdrant_prefer_grpc"],
)
if delete_existing:
qc... | A writer that listens for output events in a separate thread. | qdrant_writer | python | SciPhi-AI/agent-search | agent_search/scripts/populate_qdrant_from_postgres.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py | Apache-2.0 |
def process_batches(config, start, end, batch_size, output_queue):
"""Processes the batches in steps of the given batch_size"""
# Connect to the database
conn = psycopg2.connect(
dbname=config["postgres_db"],
user=config["postgres_user"],
password=config["postgres_password"],
... | Processes the batches in steps of the given batch_size | process_batches | python | SciPhi-AI/agent-search | agent_search/scripts/populate_qdrant_from_postgres.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py | Apache-2.0 |
def run(self, num_processes=16, batch_size=1_024, delete_existing=False):
"""Runs the population process for the qdrant database"""
qdrant_queue = multiprocessing.Queue()
qdrant_writer_thread = multiprocessing.Process(
target=qdrant_writer,
args=(
self.con... | Runs the population process for the qdrant database | run | python | SciPhi-AI/agent-search | agent_search/scripts/populate_qdrant_from_postgres.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/scripts/populate_qdrant_from_postgres.py | Apache-2.0 |
def hierarchical_similarity_reranking(
self,
query_vector: np.ndarray,
urls: List[str],
limit: int = 100,
) -> List[AgentSearchResult]:
"""Hierarchical URL search to find the most similar text chunk for the given query and URLs"""
results = self.execute_batch_query(ur... | Hierarchical URL search to find the most similar text chunk for the given query and URLs | hierarchical_similarity_reranking | python | SciPhi-AI/agent-search | agent_search/search/base.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/search/base.py | Apache-2.0 |
def pagerank_reranking(
self,
similarity_results: List[AgentSearchResult],
limit: int = 100,
) -> List[AgentSearchResult]:
"""Reranks the results based on the PageRank score of the domain"""
if not self.pagerank_rerank_module:
raise Exception(
"Pag... | Reranks the results based on the PageRank score of the domain | pagerank_reranking | python | SciPhi-AI/agent-search | agent_search/search/base.py | https://github.com/SciPhi-AI/agent-search/blob/master/agent_search/search/base.py | Apache-2.0 |
def scrub_str(string):
"""
The purpose of this function is to scrub the weird template mark-up out of strings
that Veekun is using for their pokedex.
Example:
[]{move:dragon-tail} will effect the opponents [HP]{mechanic:hp}.
Becomes:
dragon tail will effect the opponents HP.
If ... |
The purpose of this function is to scrub the weird template mark-up out of strings
that Veekun is using for their pokedex.
Example:
[]{move:dragon-tail} will effect the opponents [HP]{mechanic:hp}.
Becomes:
dragon tail will effect the opponents HP.
If you find this results in weird... | scrub_str | python | PokeAPI/pokeapi | data/v2/build.py | https://github.com/PokeAPI/pokeapi/blob/master/data/v2/build.py | BSD-3-Clause |
def __SectionLength(this):
"""(4 bytes) Gets the length of characters the given section is"""
offset = this.__SectionDataOffset
return struct.unpack_from("<I", this.__data, offset)[0] | (4 bytes) Gets the length of characters the given section is | __SectionLength | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
def __LineOffsets(this):
"""Figures out the offset for each entry based on the data section offset"""
result = [None] * this.__LineCount
sdo = int(this.__SectionDataOffset)
for i in range(0, len(result)):
result[i] = TextLine()
result[i].offset = struct.unpack_from("<i", this.__data, (i * 8) + sdo + 4)[0]... | Figures out the offset for each entry based on the data section offset | __LineOffsets | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
def HashFNV1_64(this, word):
"""Fowler-Noll-Vo hash function; 64-bit"""
fnvPrime_64 = 0x100000001b3
offsetBasis_64 = 0xCBF29CE484222645
hash = offsetBasis_64
for c in word:
hash = hash ^ ord(c)
# Cast hash to at 64-bit value
hash = (hash * fnvPrime_64) % 2**64
return hash | Fowler-Noll-Vo hash function; 64-bit | HashFNV1_64 | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
def __LineData(this, data):
"""Loads the file into a list to later decrypt"""
key = copy.copy(this.__KEY_BASE)
result = [None] * this.__LineCount
lines = this.__LineOffsets
for i in range(0, len(lines)):
# Make a list twice the size of the current text line size
encrypted = lines[i].length * 2
# The... | Loads the file into a list to later decrypt | __LineData | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
def __CryptLineData(this, data, key):
"""Decrypts the given line into a list of bytes"""
copied = copy.copy(data)
result = [None] * len(copied)
for i in range(0, len(copied), 2):
result[i] = copied[i] ^ (key % 256)
result[i + 1] = copied[i + 1] ^ ((key >> 8) % 256)
# Bit-shift and OR key, then cast to... | Decrypts the given line into a list of bytes | __CryptLineData | python | PokeAPI/pokeapi | Resources/scripts/data/gen8/read_swsh.py | https://github.com/PokeAPI/pokeapi/blob/master/Resources/scripts/data/gen8/read_swsh.py | BSD-3-Clause |
Subsets and Splits
Django Code with Docstrings
Filters Python code examples from Django repository that contain Django-related code, helping identify relevant code snippets for understanding Django framework usage patterns.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves Python code examples from Django repository that contain 'django' in the code, which helps identify Django-specific code snippets but provides limited analytical insights beyond basic filtering.
SQL Console for Shuu12121/python-treesitter-filtered-datasetsV2
Retrieves specific code examples from the Flask repository but doesn't provide meaningful analysis or patterns beyond basic data retrieval.
HTTPX Repo Code and Docstrings
Retrieves specific code examples from the httpx repository, which is useful for understanding how particular libraries are used but doesn't provide broader analytical insights about the dataset.
Requests Repo Docstrings & Code
Retrieves code examples with their docstrings and file paths from the requests repository, providing basic filtering but limited analytical value beyond finding specific code samples.
Quart Repo Docstrings & Code
Retrieves code examples with their docstrings from the Quart repository, providing basic code samples but offering limited analytical value for understanding broader patterns or relationships in the dataset.