_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q271900 | CursesMenu.draw | test | def draw(self):
"""
Redraws the menu and refreshes the screen. Should be called whenever something changes that needs to be redrawn.
"""
self.screen.border(0)
if self.title is not None:
self.screen.addstr(2, 2, self.title, curses.A_STANDOUT)
if self.subtitle ... | python | {
"resource": ""
} |
q271901 | CursesMenu.process_user_input | test | def process_user_input(self):
"""
Gets the next single character and decides what to do with it
"""
user_input = self.get_input()
go_to_max = ord("9") if len(self.items) >= 9 else ord(str(len(self.items)))
if ord('1') <= user_input <= go_to_max:
self.go_to(u... | python | {
"resource": ""
} |
q271902 | CursesMenu.select | test | def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self... | python | {
"resource": ""
} |
q271903 | parse_old_menu | test | def parse_old_menu(menu_data):
"""
Take an old-style menuData dictionary and return a CursesMenu
:param dict menu_data:
:return: A new CursesMenu
:rtype: CursesMenu
"""
menu_title = menu_data['title']
menu = CursesMenu(menu_title)
for item in menu_data["options"]:
item_type ... | python | {
"resource": ""
} |
q271904 | top | test | def top(
df,
value: str,
limit: int,
order: str = 'asc',
group: Union[str, List[str]] = None
):
"""
Get the top or flop N results based on a column value for each specified group columns
---
### Parameters
*mandatory :*
- `value` (*str*): column name on... | python | {
"resource": ""
} |
q271905 | top_group | test | def top_group(
df,
aggregate_by: List[str],
value: str,
limit: int,
order: str = 'asc',
function: str = 'sum',
group: Union[str, List[str]] = None
):
"""
Get the top or flop N results based on a function and a column value that agregates the input.
The... | python | {
"resource": ""
} |
q271906 | convert_str_to_datetime | test | def convert_str_to_datetime(df, *, column: str, format: str):
"""
Convert string column into datetime column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to format
- `format` (*str*): current format of the values (see [available formats](
https://docs.python... | python | {
"resource": ""
} |
q271907 | convert_datetime_to_str | test | def convert_datetime_to_str(df, *, column: str, format: str, new_column: str = None):
"""
Convert datetime column into string column
---
### Parameters
*mandatory :*
- column (*str*): name of the column to format
- format (*str*): format of the result values (see [available formats](
... | python | {
"resource": ""
} |
q271908 | change_date_format | test | def change_date_format(
df, *,
column: str,
output_format: str,
input_format: str = None,
new_column: str = None,
new_time_zone=None
):
"""
Convert the format of a date
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to c... | python | {
"resource": ""
} |
q271909 | cast | test | def cast(df, column: str, type: str, new_column=None):
"""
Convert column's type into type
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to convert
- `type` (*str*): output type. It can be :
- `"int"` : integer type
- `"float"` : general number ty... | python | {
"resource": ""
} |
q271910 | rank | test | def rank(
df,
value_cols: Union[str, List[str]],
group_cols: List[str] = None,
rank_cols_names: List[str] = None,
method='min',
ascending: bool = True
):
"""
This function creates rank columns based on numeric values to be ranked.
---
### Parameters
... | python | {
"resource": ""
} |
q271911 | waterfall | test | def waterfall(
df,
date: str,
value: str,
start: Dict[str, str],
end: Dict[str, str],
upperGroup: Dict[str, str],
insideGroup: Dict[str, str] = None,
filters: List[str] = None
):
"""
Return a line for each bars of a waterfall chart, totals, groups,... | python | {
"resource": ""
} |
q271912 | _basic_math_operation | test | def _basic_math_operation(df, new_column, column_1, column_2, op):
"""
Basic mathematical operation to apply operator on `column_1` and `column_2`
Both can be either a number or the name of a column of `df`
Will create a new column named `new_column`
"""
if not isinstance(column_1, (str, int, fl... | python | {
"resource": ""
} |
q271913 | round_values | test | def round_values(df, *, column: str, decimals: int, new_column: str = None):
"""
Round each value of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to round
- `decimals` (*int*): number of decimal to keeep
*optional :*
- `new_column` (*str*): nam... | python | {
"resource": ""
} |
q271914 | absolute_values | test | def absolute_values(df, *, column: str, new_column: str = None):
"""
Get the absolute numeric value of each element of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column
*optional :*
- `new_column` (*str*): name of the column containing the result.
... | python | {
"resource": ""
} |
q271915 | pivot | test | def pivot(df, index: List[str], column: str, value: str, agg_function: str = 'mean'):
"""
Pivot the data. Reverse operation of melting
---
### Parameters
*mandatory :*
- `index` (*list*): names of index columns.
- `column` (*str*): column name to pivot on
- `value` (*str*): column nam... | python | {
"resource": ""
} |
q271916 | pivot_by_group | test | def pivot_by_group(
df,
variable,
value,
new_columns,
groups,
id_cols=None
):
"""
Pivot a dataframe by group of variables
---
### Parameters
*mandatory :*
* `variable` (*str*): name of the column used to create the groups.
* `value` (*str*):... | python | {
"resource": ""
} |
q271917 | groupby | test | def groupby(df, *, group_cols: Union[str, List[str]],
aggregations: Dict[str, Union[str, List[str]]]):
"""
Aggregate values by groups.
---
### Parameters
*mandatory :*
- `group_cols` (*list*): list of columns used to group data
- `aggregations` (*dict*): dictionnary of values ... | python | {
"resource": ""
} |
q271918 | cumsum | test | def cumsum(df, new_column: str, column: str, index: list, date_column: str, date_format: str):
"""
DEPRECATED - please use `compute_cumsum` instead
"""
logging.getLogger(__name__).warning(f"DEPRECATED: use compute_cumsum")
date_temp = '__date_temp__'
if isinstance(index, str):
index = [i... | python | {
"resource": ""
} |
q271919 | add_missing_row | test | def add_missing_row(
df: pd.DataFrame,
id_cols: List[str],
reference_col: str,
complete_index: Union[Dict[str, str], List[str]] = None,
method: str = None,
cols_to_keep: List[str] = None
) -> pd.DataFrame:
"""
Add missing row to a df base on a reference column
---
### Parameter... | python | {
"resource": ""
} |
q271920 | catch | test | def catch(logger):
"""
Decorator to catch an exception and don't raise it.
Logs information if a decorator failed.
Note:
We don't want possible exceptions during logging to be raised.
This is used to decorate any function that gets executed
before or after the execution of the d... | python | {
"resource": ""
} |
q271921 | log_message | test | def log_message(logger, message=""):
"""
Decorator to log a message before executing a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
_log_message(logger, func.__name__, message)
result = func(*args, **kwargs)
return resul... | python | {
"resource": ""
} |
q271922 | log_time | test | def log_time(logger):
"""
Decorator to log the execution time of a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
_log_time(logger, func.__na... | python | {
"resource": ""
} |
q271923 | log_shapes | test | def log_shapes(logger):
"""
Decorator to log the shapes of input and output dataframes
It considers all the dataframes passed either as arguments or keyword arguments as inputs
and all the dataframes returned as outputs.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, *... | python | {
"resource": ""
} |
q271924 | rename | test | def rename(
df,
values: Dict[str, Dict[str, str]] = None,
columns: Dict[str, Dict[str, str]] = None,
locale: str = None
):
"""
Replaces data values and column names according to the locale
---
### Parameters
- `values` (optional: dict):
- key: term to be re... | python | {
"resource": ""
} |
q271925 | compute_cumsum | test | def compute_cumsum(
df,
id_cols: List[str],
reference_cols: List[str],
value_cols: List[str],
new_value_cols: List[str] = None,
cols_to_keep: List[str] = None
):
"""
Compute cumsum for a group of columns.
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the colum... | python | {
"resource": ""
} |
q271926 | combine_columns_aggregation | test | def combine_columns_aggregation(
df,
id_cols: List[str],
cols_for_combination: Dict[str, str],
agg_func: Union[str, List[str], Dict[str, str]] = 'sum'
):
"""
Aggregates data to reproduce "All" category for requester
---
### Parameters
*mandatory :*
- `id_cols` ... | python | {
"resource": ""
} |
q271927 | get_param_value_from_func_call | test | def get_param_value_from_func_call(param_name, func, call_args, call_kwargs):
"""
Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" whe... | python | {
"resource": ""
} |
q271928 | clean_cachedir_old_entries | test | def clean_cachedir_old_entries(cachedir: StoreBackendBase, func_name: str, limit: int) -> int:
"""Remove old entries from the cache"""
if limit < 1:
raise ValueError("'limit' must be greater or equal to 1")
cache_entries = get_cachedir_entries(cachedir, func_name)
cache_entries = sorted(cache_e... | python | {
"resource": ""
} |
q271929 | roll_up | test | def roll_up(
df,
levels: List[str],
groupby_vars: List[str],
extra_groupby_cols: List[str] = None,
var_name: str = 'type',
value_name: str = 'value',
agg_func: str = 'sum',
drop_levels: List[str] = None
):
"""
Creates aggregates following a given h... | python | {
"resource": ""
} |
q271930 | argmax | test | def argmax(df, column: str, groups: Union[str, List[str]] = None):
"""
Keep the row of the data corresponding to the maximal value in a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column containing the value you want to keep the maximum
*optional :*
- `gro... | python | {
"resource": ""
} |
q271931 | argmin | test | def argmin(df, column: str, groups: Union[str, List[str]] = None):
"""
Keep the row of the data corresponding to the minimal value in a column
---
### Parameters
*mandatory :*
- `column` (str): name of the column containing the value you want to keep the minimum
*optional :*
- `group... | python | {
"resource": ""
} |
q271932 | fillna | test | def fillna(df, column: str, value=None, column_value=None):
"""
Can fill NaN values from a column with a given value or a column
---
### Parameters
- `column` (*str*): name of column you want to fill
- `value`: NaN will be replaced by this value
- `column_value`: NaN will be replaced by ... | python | {
"resource": ""
} |
q271933 | add_offset | test | def add_offset(dateobj, hr_offset: str, sign: str):
"""add a human readable offset to `dateobj` and return corresponding date.
rely on `pandas.Timedelta` and add the following extra shortcuts:
- "w", "week" and "weeks" for a week (i.e. 7days)
- "month', "months" for a month (i.e. no day computation, ju... | python | {
"resource": ""
} |
q271934 | add_months | test | def add_months(dateobj, nb_months: int):
"""return `dateobj` + `nb_months`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_months(date(2018, 1, 1), 1)
datetime.date(2018, 1, 1)
>>> add_months(date(2018, 1, 1), -1)
datetime.date(2017, 1... | python | {
"resource": ""
} |
q271935 | add_years | test | def add_years(dateobj, nb_years):
"""return `dateobj` + `nb_years`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_years(date(2018, 1, 1), 1)
datetime.date(2019, 1, 1)
>>> add_years(date(2018, 1, 1), -1)
datetime.date(2017, 1, 1)
>... | python | {
"resource": ""
} |
q271936 | parse_date | test | def parse_date(datestr: str, date_fmt: str) -> date:
"""parse `datestr` and return corresponding date object.
`datestr` should be a string matching `date_fmt` and parseable by `strptime`
but some offset can also be added using `(datestr) + OFFSET` or `(datestr) -
OFFSET` syntax. When using this syntax,... | python | {
"resource": ""
} |
q271937 | filter_by_date | test | def filter_by_date(
df,
date_col: str,
date_format: str = '%Y-%m-%d',
start: str = None,
stop: str = None,
atdate: str = None
):
"""
Filter dataframe your data by date.
This function will interpret `start`, `stop` and `atdate` and build
the corresponding date range. The caller m... | python | {
"resource": ""
} |
q271938 | percentage | test | def percentage(
df,
column: str,
group_cols: Union[str, List[str]] = None,
new_column: str = None
):
"""
Add a column to the dataframe according to the groupby logic on group_cols
---
### Parameters
*mandatory :*
- `column` (*str*): name of the desired column y... | python | {
"resource": ""
} |
q271939 | ada_family_core | test | def ada_family_core(params, gparams, learning_rate = 0.01, eps= 1e-6, rho=0.95, method="ADADELTA",
beta=0.0, gsum_regularization = 0.0001):
"""
Optimize by SGD, AdaGrad, or AdaDelta.
"""
_, _, _, args = inspect.getargvalues(inspect.currentframe())
logging.info("ada_family_co... | python | {
"resource": ""
} |
q271940 | GeneralNeuralTrainer._learning_updates | test | def _learning_updates(self):
"""
Return updates in the training.
"""
params = self.training_params()
gradients = self.get_gradients(params)
return self.optimization_updates(params, gradients) | python | {
"resource": ""
} |
q271941 | GeneralNeuralTrainer.training_params | test | def training_params(self):
"""
Get parameters to be optimized.
"""
params = self.network.parameters
# Freeze parameters
if self.config.fixed_parameters:
logging.info("fixed parameters: %s" % ", ".join(map(str, self.config.fixed_parameters)))
params... | python | {
"resource": ""
} |
q271942 | GeneralNeuralTrainer.optimization_updates | test | def optimization_updates(self, params, gradients):
"""
Return updates from optimization.
"""
updates, free_parameters = optimize_updates(params, gradients, self.config)
self.network.free_parameters.extend(free_parameters)
logging.info("Added %d free parameters for optimiz... | python | {
"resource": ""
} |
q271943 | FirstGlimpseLayer._first_glimpse_sensor | test | def _first_glimpse_sensor(self, x_t):
"""
Compute first glimpse position using down-sampled image.
"""
downsampled_img = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4))
downsampled_img = downsampled_img.flatten()
first_l = T.dot(downsampled_img, self.W_f)
... | python | {
"resource": ""
} |
q271944 | MyJointTrainingModel.prepare | test | def prepare(self):
"""
All codes that create parameters should be put into 'setup' function.
"""
self.output_dim = 10
self.encoder = Chain(self.input_dim).stack(Dense(self.internal_layer_size, 'tanh'))
self.decoder = Chain(self.internal_layer_size).stack(Dense(self.input_... | python | {
"resource": ""
} |
q271945 | MyJointTrainingModel.compute_tensor | test | def compute_tensor(self, x):
"""
Build the computation graph here.
"""
internal_variable = self.encoder.compute_tensor(x)
decoding_output = self.decoder.compute_tensor(internal_variable)
classification_output = self.classifier.compute_tensor(internal_variable)
... | python | {
"resource": ""
} |
q271946 | BasicDataset.map | test | def map(self, func):
"""
Process all data with given function.
The scheme of function should be x,y -> x,y.
"""
if self._train_set:
self._train_set = map(func, self._train_set)
if self._valid_set:
self._valid_set = map(func, self._valid_set)
... | python | {
"resource": ""
} |
q271947 | BasicDataset.vectorize_target | test | def vectorize_target(self, size):
"""
Make targets be one-hot vectors.
"""
if self._train_set:
self._train_set = self._vectorize_set(self._train_set, size)
if self._valid_set:
self._valid_set = self._vectorize_set(self._valid_set, size)
if self._te... | python | {
"resource": ""
} |
q271948 | BasicDataset.report | test | def report(self):
"""
Print dataset statistics.
"""
logging.info("%s train=%d valid=%d test=%d" % (self.__class__.__name__,
len(list(self._train_set)) if self._train_set else 0,
... | python | {
"resource": ""
} |
q271949 | CustomizeTrainer.train | test | def train(self, train_set, valid_set=None, test_set=None, train_size=None):
'''We train over mini-batches and evaluate periodically.'''
iteration = 0
while True:
if not iteration % self.config.test_frequency and test_set:
try:
self.test(iteration, ... | python | {
"resource": ""
} |
q271950 | NeuralLM.sample | test | def sample(self, input, steps):
"""
Sample outputs from LM.
"""
inputs = [[onehot(self.input_dim, x) for x in input]]
for _ in range(steps):
target = self.compute(inputs)[0,-1].argmax()
input.append(target)
inputs[0].append(onehot(self.input_di... | python | {
"resource": ""
} |
q271951 | Attention.compute_alignments | test | def compute_alignments(self, prev_state, precomputed_values, mask=None):
"""
Compute the alignment weights based on the previous state.
"""
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
# For test time the UaH will be (time, output_dim)
if UaH.ndim =... | python | {
"resource": ""
} |
q271952 | Attention.compute_context_vector | test | def compute_context_vector(self, prev_state, inputs, precomputed_values=None, mask=None):
"""
Compute the context vector with soft attention.
"""
precomputed_values = precomputed_values if precomputed_values else self.precompute(inputs)
align_weights = self.compute_alignments(pre... | python | {
"resource": ""
} |
q271953 | concatenate | test | def concatenate(vars, axis=-1):
"""
A utility function of concatenate.
"""
from deepy.core.neural_var import NeuralVariable
if isinstance(vars[0], NeuralVariable):
concat_var = Concatenate(axis=axis).compute(*vars)
if axis == -1 or axis == vars[0].tensor.ndim - 1:
concat_... | python | {
"resource": ""
} |
q271954 | SequentialDataset._pad | test | def _pad(self, side, length):
"""
Pad sequences to given length in the left or right side.
"""
if self._train_set:
self._train_set = pad_dataset(self._train_set, side, length)
if self._valid_set:
self._valid_set = pad_dataset(self._valid_set, side, length)... | python | {
"resource": ""
} |
q271955 | rmsprop_core | test | def rmsprop_core(params, gradients, momentum=0.9, learning_rate=0.01):
"""
RMSPROP optimization core.
"""
for param, grad in zip(params, gradients):
rms_ = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_rms')
rms = momentum * rms_ + (1 - momentum) * grad * gr... | python | {
"resource": ""
} |
q271956 | Timer.report | test | def report(self):
"""
Report elapsed time.
"""
if not self.end_time:
self.end()
print ("Time: {} mins".format((self.end_time - self.start_time )/ 60)) | python | {
"resource": ""
} |
q271957 | TrainingValidator.run | test | def run(self, data_x):
"""
Run the model with validation data and return costs.
"""
output_vars = self.compute(*data_x)
return self._extract_costs(output_vars) | python | {
"resource": ""
} |
q271958 | TrainingValidator.invoke | test | def invoke(self):
"""
This function will be called after each iteration.
"""
self._counter += 1
if self._counter % self._freq == 0:
cnt = 0.
sum_map = defaultdict(float)
for x in self._trainer.get_data(self._data_split):
val_map... | python | {
"resource": ""
} |
q271959 | Loop._build_loop_vars | test | def _build_loop_vars(self):
"""
Create inner loop variables.
"""
from theano.tensor.var import TensorVariable
from deepy.core.neural_var import NeuralVariable
if not self._loop_vars:
self._ordered_out_keys = self._outputs.keys()
seq_keys = self._se... | python | {
"resource": ""
} |
q271960 | Loop._scan_step | test | def _scan_step(self, vars):
"""
Internal scan with dummy input variables.
"""
from neural_var import NeuralVariable
if not self._loop_vars:
raise Exception("The loop is not initialized. To initialize the loop, use `with loop as vars`")
replace_map = {}
... | python | {
"resource": ""
} |
q271961 | momentum_core | test | def momentum_core(params, gradients, momentum=0.9, learning_rate=0.01):
"""
Momentum SGD optimization core.
"""
free_parameters = []
updates = []
for param, grad in zip(params, gradients):
delta = learning_rate * grad
velocity = theano.shared(np.zeros_like(param.get_value... | python | {
"resource": ""
} |
q271962 | Runtime.iftrain | test | def iftrain(self, then_branch, else_branch):
"""
Execute `then_branch` when training.
"""
return ifelse(self._training_flag, then_branch, else_branch, name="iftrain") | python | {
"resource": ""
} |
q271963 | NeuralTrainer.skip | test | def skip(self, n_batches, n_epochs=0):
"""
Skip N batches in the training.
"""
logging.info("skip %d epochs and %d batches" % (n_epochs, n_batches))
self._skip_batches = n_batches
self._skip_epochs = n_epochs | python | {
"resource": ""
} |
q271964 | NeuralTrainer.load_params | test | def load_params(self, path, exclude_free_params=False):
"""
Load parameters for the training.
This method can load free parameters and resume the training progress.
"""
self.network.load_params(path, exclude_free_params=exclude_free_params)
self.best_params = self.copy_pa... | python | {
"resource": ""
} |
q271965 | NeuralTrainer.train | test | def train(self, train_set, valid_set=None, test_set=None, train_size=None):
"""
Train the model and return costs.
"""
self._epoch = 0
while True:
if self._skip_epochs > 0:
logging.info("skipping one epoch ...")
self._skip_epochs -= 1
... | python | {
"resource": ""
} |
q271966 | NeuralTrainer._run_train | test | def _run_train(self, epoch, train_set, train_size=None):
"""
Run one training iteration.
"""
self.network.train_logger.record_epoch(epoch + 1)
costs = self.train_step(train_set, train_size)
if not epoch % self.config.monitor_frequency:
self.report(dict(costs),... | python | {
"resource": ""
} |
q271967 | NeuralTrainer._run_valid | test | def _run_valid(self, epoch, valid_set, dry_run=False, save_path=None):
"""
Run one valid iteration, return true if to continue training.
"""
costs = self.valid_step(valid_set)
# this is the same as: (J_i - J_f) / J_i > min improvement
_, J = costs[0]
new_best = Fa... | python | {
"resource": ""
} |
q271968 | NeuralTrainer.report | test | def report(self, score_map, type="valid", epoch=-1, new_best=False):
"""
Report the scores and record them in the log.
"""
type_str = type
if len(type_str) < 5:
type_str += " " * (5 - len(type_str))
info = " ".join("%s=%.2f" % el for el in score_map.items())
... | python | {
"resource": ""
} |
q271969 | NeuralTrainer.get_data | test | def get_data(self, data_split="train"):
"""
Get specified split of data.
"""
if data_split == 'train':
return self._current_train_set
elif data_split == 'valid':
return self._current_valid_set
elif data_split == 'test':
return self._cur... | python | {
"resource": ""
} |
q271970 | NeuralVariable.apply | test | def apply(self, func, dim=None):
"""
Apply a function to tensors.
"""
output_dim = dim if dim else self.output_dim
return NeuralVariable(func(self.tensor), output_dim) | python | {
"resource": ""
} |
q271971 | GeneralConfig.report | test | def report(self):
"""
Report usage of training parameters.
"""
if self.logger:
self.logger.info("accessed parameters:")
for key in self.used_parameters:
self.logger.info(" - %s %s" % (key, "(undefined)" if key in self.undefined_parameters else "")) | python | {
"resource": ""
} |
q271972 | GraphBuilder.var | test | def var(self, tensor_type, last_dim=0, test_shape=None):
"""
An alias of deepy.tensor.var.
"""
from deepy.tensor import var
return var(tensor_type, last_dim=last_dim, test_shape=test_shape) | python | {
"resource": ""
} |
q271973 | GraphBuilder.create_vars_from_data | test | def create_vars_from_data(self, dataset, split="train"):
"""
Create vars given a dataset and set test values.
Useful when dataset is already defined.
"""
from deepy.core.neural_var import NeuralVariable
vars = []
if split == "valid":
data_split = datas... | python | {
"resource": ""
} |
q271974 | GraphBuilder.shared | test | def shared(self, value, name=None):
"""
Create a shared theano scalar value.
"""
if type(value) == int:
final_value = np.array(value, dtype="int32")
elif type(value) == float:
final_value = np.array(value, dtype=env.FLOATX)
else:
final_... | python | {
"resource": ""
} |
q271975 | AutoEncoder.stack_encoders | test | def stack_encoders(self, *layers):
"""
Stack encoding layers, this must be done before stacking decoding layers.
"""
self.stack(*layers)
self.encoding_layes.extend(layers) | python | {
"resource": ""
} |
q271976 | AutoEncoder.stack_decoders | test | def stack_decoders(self, *layers):
"""
Stack decoding layers.
"""
self.stack(*layers)
self.decoding_layers.extend(layers) | python | {
"resource": ""
} |
q271977 | AutoEncoder.encode | test | def encode(self, x):
"""
Encode given input.
"""
if not self.encoding_network:
self.encoding_network = NeuralNetwork(self.input_dim, self.input_tensor)
self.encoding_network.input_variables = self.input_variables
for layer in self.encoding_layes:
... | python | {
"resource": ""
} |
q271978 | AutoEncoder.decode | test | def decode(self, x):
"""
Decode given representation.
"""
if not self.rep_dim:
raise Exception("rep_dim must be set to decode.")
if not self.decoding_network:
self.decoding_network = NeuralNetwork(self.rep_dim)
for layer in self.decoding_layers... | python | {
"resource": ""
} |
q271979 | create_2d_gaussian | test | def create_2d_gaussian(dim, sigma):
"""
This function creates a 2d gaussian kernel with the standard deviation
denoted by sigma
:param dim: integer denoting a side (1-d) of gaussian kernel
:param sigma: floating point indicating the standard deviation
:returns: a numpy 2d array
"""
# ... | python | {
"resource": ""
} |
q271980 | NeuralNetwork.register_layer | test | def register_layer(self, layer):
"""
Register the layer so that it's param will be trained.
But the output of the layer will not be stacked.
"""
if type(layer) == Block:
layer.fix()
self.parameter_count += layer.parameter_count
self.parameters.extend(l... | python | {
"resource": ""
} |
q271981 | NeuralNetwork.monitor_layer_outputs | test | def monitor_layer_outputs(self):
"""
Monitoring the outputs of each layer.
Useful for troubleshooting convergence problems.
"""
for layer, hidden in zip(self.layers, self._hidden_outputs):
self.training_monitors.append(('mean(%s)' % (layer.name), abs(hidden).mean())) | python | {
"resource": ""
} |
q271982 | NeuralNetwork.all_parameters | test | def all_parameters(self):
"""
Return all parameters.
"""
params = []
params.extend(self.parameters)
params.extend(self.free_parameters)
return params | python | {
"resource": ""
} |
q271983 | NeuralNetwork.setup_variables | test | def setup_variables(self):
"""
Set up variables.
"""
if self.input_tensor:
if type(self.input_tensor) == int:
x = dim_to_var(self.input_tensor, name="x")
else:
x = self.input_tensor
else:
x = T.matrix('x')
... | python | {
"resource": ""
} |
q271984 | NeuralNetwork.compute | test | def compute(self, *x):
"""
Return network output.
"""
self._compile()
outs = self._compute(*x)
if self._output_keys:
return MapDict(dict(zip(self._output_keys, outs)))
else:
return outs | python | {
"resource": ""
} |
q271985 | NeuralNetwork.save_params | test | def save_params(self, path, new_thread=False):
"""
Save parameters to file.
"""
save_logger.info(path)
param_variables = self.all_parameters
params = [p.get_value().copy() for p in param_variables]
if new_thread:
thread = Thread(target=save_network_par... | python | {
"resource": ""
} |
q271986 | NeuralNetwork.load_params | test | def load_params(self, path, exclude_free_params=False):
"""
Load parameters from file.
"""
if not os.path.exists(path): return;
logging.info("loading parameters from %s" % path)
# Decide which parameters to load
if exclude_free_params:
params_to_load =... | python | {
"resource": ""
} |
q271987 | NeuralNetwork.report | test | def report(self):
"""
Print network statistics.
"""
logging.info("network inputs: %s", " ".join(map(str, self.input_variables)))
logging.info("network targets: %s", " ".join(map(str, self.target_variables)))
logging.info("network parameters: %s", " ".join(map(str, self.al... | python | {
"resource": ""
} |
q271988 | NeuralLayer.register_parameters | test | def register_parameters(self, *parameters):
"""
Register parameters.
"""
for param in parameters:
self.parameter_count += np.prod(param.get_value().shape)
self.parameters.extend(parameters) | python | {
"resource": ""
} |
q271989 | NeuralLayer.register_updates | test | def register_updates(self, *updates):
"""
Register updates that will be executed in each iteration.
"""
for key, node in updates:
if key not in self._registered_updates:
self.updates.append((key, node))
self._registered_updates.add(key) | python | {
"resource": ""
} |
q271990 | NeuralLayer.register_training_updates | test | def register_training_updates(self, *updates):
"""
Register updates that will only be executed in training phase.
"""
for key, node in updates:
if key not in self._registered_training_updates:
self.training_updates.append((key, node))
self._reg... | python | {
"resource": ""
} |
q271991 | NeuralLayer.register_monitors | test | def register_monitors(self, *monitors):
"""
Register monitors they should be tuple of name and Theano variable.
"""
for key, node in monitors:
if key not in self._registered_monitors:
node *= 1.0 # Avoid CudaNdarray
self.training_monitors.appen... | python | {
"resource": ""
} |
q271992 | multiple_l2_norm | test | def multiple_l2_norm(tensors):
"""
Get the L2 norm of multiple tensors.
This function is taken from blocks.
"""
# Another way for doing this, I don't know which one is fast
# return T.sqrt(sum(T.sum(t ** 2) for t in tensors))
flattened = [T.as_tensor_variable(t).flatten() for t in tensors]
... | python | {
"resource": ""
} |
q271993 | StreamPickler.dump_one | test | def dump_one(elt_to_pickle, file_obj):
"""
dumps one element to file_obj, a file opened in write mode
"""
pickled_elt_str = dumps(elt_to_pickle)
file_obj.write(pickled_elt_str)
# record separator is a blank line
# (since pickled_elt_str might contain its own newli... | python | {
"resource": ""
} |
q271994 | StreamPickler.load | test | def load(file_obj):
"""
load contents from file_obj, returning a generator that yields one
element at a time
"""
cur_elt = []
for line in file_obj:
cur_elt.append(line)
if line == '\n':
pickled_elt_str = ''.join(cur_elt)
cur_el... | python | {
"resource": ""
} |
q271995 | Block.load_params | test | def load_params(self, path, exclude_free_params=False):
from deepy.core import graph
"""
Load parameters to the block.
"""
from deepy.core.comp_graph import ComputationalGraph
model = graph.compile(blocks=[self])
model.load_params(path, exclude_free_params=exclude... | python | {
"resource": ""
} |
q271996 | OAuth2.create_request_elements | test | def create_request_elements(
cls, request_type, credentials, url, method='GET', params=None,
headers=None, body='', secret=None, redirect_uri='', scope='',
csrf='', user_state=''
):
"""
Creates |oauth2| request elements.
"""
headers = headers or {... | python | {
"resource": ""
} |
q271997 | OAuth2.decode_state | test | def decode_state(cls, state, param='user_state'):
"""
Decode state and return param.
:param str state:
state parameter passed through by provider
:param str param:
key to query from decoded state variable. Options include 'csrf'
and 'user_state'.
... | python | {
"resource": ""
} |
q271998 | Facebook._x_credentials_parser | test | def _x_credentials_parser(credentials, data):
"""
We need to override this method to fix Facebooks naming deviation.
"""
# Facebook returns "expires" instead of "expires_in".
credentials.expire_in = data.get('expires')
if data.get('token_type') == 'bearer':
... | python | {
"resource": ""
} |
q271999 | Google._x_request_elements_filter | test | def _x_request_elements_filter(cls, request_type, request_elements,
credentials):
"""
Google doesn't accept client ID and secret to be at the same time in
request parameters and in the basic authorization header in the access
token request.
"""
... | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.