text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def boxify_points(geom, rast):
""" Point and MultiPoint don't play well with GDALRasterize convert them into box polygons 99% cellsize, centered on the raster cell """ |
if 'Point' not in geom.type:
raise ValueError("Points or multipoints only")
buff = -0.01 * abs(min(rast.affine.a, rast.affine.e))
if geom.type == 'Point':
pts = [geom]
elif geom.type == "MultiPoint":
pts = geom.geoms
geoms = []
for pt in pts:
row, col = rast.index(pt.x, pt.y)
win = ((row, row + 1), (col, col + 1))
geoms.append(box(*window_bounds(win, rast.affine)).buffer(buff))
return MultiPolygon(geoms) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_feature(obj):
""" Given a python object attemp to a GeoJSON-like Feature from it """ |
# object implementing geo_interface
if hasattr(obj, '__geo_interface__'):
gi = obj.__geo_interface__
if gi['type'] in geom_types:
return wrap_geom(gi)
elif gi['type'] == 'Feature':
return gi
# wkt
try:
shape = wkt.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError, AttributeError):
pass
# wkb
try:
shape = wkb.loads(obj)
return wrap_geom(shape.__geo_interface__)
except (ReadingError, TypeError):
pass
# geojson-like python mapping
try:
if obj['type'] in geom_types:
return wrap_geom(obj)
elif obj['type'] == 'Feature':
return obj
except (AssertionError, TypeError):
pass
raise ValueError("Can't parse %s as a geojson Feature object" % obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bounds_window(bounds, affine):
"""Create a full cover rasterio-style window """ |
w, s, e, n = bounds
row_start, col_start = rowcol(w, n, affine)
row_stop, col_stop = rowcol(e, s, affine, op=math.ceil)
return (row_start, row_stop), (col_start, col_stop) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, bounds=None, window=None, masked=False):
""" Performs a boundless read against the underlying array source Parameters bounds: bounding box in w, s, e, n order, iterable, optional window: rasterio-style window, optional bounds OR window are required, specifying both or neither will raise exception masked: boolean return a masked numpy array, default: False bounds OR window are required, specifying both or neither will raise exception Returns ------- Raster object with update affine and array info """ |
# Calculate the window
if bounds and window:
raise ValueError("Specify either bounds or window")
if bounds:
win = bounds_window(bounds, self.affine)
elif window:
win = window
else:
raise ValueError("Specify either bounds or window")
c, _, _, f = window_bounds(win, self.affine) # c ~ west, f ~ north
a, b, _, d, e, _, _, _, _ = tuple(self.affine)
new_affine = Affine(a, b, c, d, e, f)
nodata = self.nodata
if nodata is None:
nodata = -999
warnings.warn("Setting nodata to -999; specify nodata explicitly")
if self.array is not None:
# It's an ndarray already
new_array = boundless_array(
self.array, window=win, nodata=nodata, masked=masked)
elif self.src:
# It's an open rasterio dataset
new_array = self.src.read(
self.band, window=win, boundless=True, masked=masked)
return Raster(new_array, new_affine, nodata) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def chunks(data, n):
"""Yield successive n-sized chunks from a slice-able iterable.""" |
for i in range(0, len(data), n):
yield data[i:i+n] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def halt(self):
"""Switch state to HALT""" |
buf = []
buf.append(self.act_end)
buf.append(0)
crc = self.calculate_crc(buf)
self.clear_bitmask(0x08, 0x80)
self.card_write(self.mode_transrec, buf)
self.clear_bitmask(0x08, 0x08)
self.authed = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, block_address, data):
""" Writes data to block. You should be authenticated before calling write. Returns error state. """ |
buf = []
buf.append(self.act_write)
buf.append(block_address)
crc = self.calculate_crc(buf)
buf.append(crc[0])
buf.append(crc[1])
(error, back_data, back_length) = self.card_write(self.mode_transrec, buf)
if not(back_length == 4) or not((back_data[0] & 0x0F) == 0x0A):
error = True
if not error:
buf_w = []
for i in range(16):
buf_w.append(data[i])
crc = self.calculate_crc(buf_w)
buf_w.append(crc[0])
buf_w.append(crc[1])
(error, back_data, back_length) = self.card_write(self.mode_transrec, buf_w)
if not(back_length == 4) or not((back_data[0] & 0x0F) == 0x0A):
error = True
return error |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth(self, auth_method, key):
""" Sets authentication info for current tag """ |
self.method = auth_method
self.key = key
if self.debug:
print("Changing used auth key to " + str(key) + " using method " + ("A" if auth_method == self.rfid.auth_a else "B")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_trailer(self, sector, key_a=(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF), auth_bits=(0xFF, 0x07, 0x80), user_data=0x69, key_b=(0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF)):
""" Writes sector trailer of specified sector. Tag and auth must be set - does auth. If value is None, value of byte is kept. Returns error state. """ |
addr = self.block_addr(sector, 3)
return self.rewrite(addr, key_a[:6] + auth_bits[:3] + (user_data, ) + key_b[:6]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rewrite(self, block_address, new_bytes):
""" Rewrites block with new bytes, keeping the old ones if None is passed. Tag and auth must be set - does auth. Returns error state. """ |
if not self.is_tag_set_auth():
return True
error = self.do_auth(block_address)
if not error:
(error, data) = self.rfid.read(block_address)
if not error:
for i in range(len(new_bytes)):
if new_bytes[i] != None:
if self.debug:
print("Changing pos " + str(i) + " with current value " + str(data[i]) + " to " + str(new_bytes[i]))
data[i] = new_bytes[i]
error = self.rfid.write(block_address, data)
if self.debug:
print("Writing " + str(data) + " to " + self.sector_string(block_address))
return error |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_access_bits(self, c1, c2, c3):
""" Calculates the access bits for a sector trailer based on their access conditions c1, c2, c3, c4 are 4 items tuples containing the values for each block returns the 3 bytes for the sector trailer """ |
byte_6 = ((~c2[3] & 1) << 7) + ((~c2[2] & 1) << 6) + ((~c2[1] & 1) << 5) + ((~c2[0] & 1) << 4) + \
((~c1[3] & 1) << 3) + ((~c1[2] & 1) << 2) + ((~c1[1] & 1) << 1) + (~c1[0] & 1)
byte_7 = ((c1[3] & 1) << 7) + ((c1[2] & 1) << 6) + ((c1[1] & 1) << 5) + ((c1[0] & 1) << 4) + \
((~c3[3] & 1) << 3) + ((~c3[2] & 1) << 2) + ((~c3[1] & 1) << 1) + (~c3[0] & 1)
byte_8 = ((c3[3] & 1) << 7) + ((c3[2] & 1) << 6) + ((c3[1] & 1) << 5) + ((c3[0] & 1) << 4) + \
((c2[3] & 1) << 3) + ((c2[2] & 1) << 2) + ((c2[1] & 1) << 1) + (c2[0] & 1)
return byte_6, byte_7, byte_8 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_data(n_samples=100):
"""Get synthetic classification data with n_samples samples.""" |
X, y = make_classification(
n_samples=n_samples,
n_features=N_FEATURES,
n_classes=N_CLASSES,
random_state=0,
)
X = X.astype(np.float32)
return X, y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_model(with_pipeline=False):
"""Get a multi-layer perceptron model. Optionally, put it in a pipeline that scales the data. """ |
model = NeuralNetClassifier(MLPClassifier)
if with_pipeline:
model = Pipeline([
('scale', FeatureUnion([
('minmax', MinMaxScaler()),
('normalize', Normalizer()),
])),
('select', SelectKBest(k=N_FEATURES)), # keep input size constant
('net', model),
])
return model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_model(model, output_file):
"""Save model to output_file, if given""" |
if not output_file:
return
with open(output_file, 'wb') as f:
pickle.dump(model, f)
print("Saved model to file '{}'.".format(output_file)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def net(n_samples=100, output_file=None, **kwargs):
"""Train an MLP classifier on synthetic data. n_samples : int (default=100) Number of training samples output_file : str (default=None) If not None, file name used to save the model. kwargs : dict Additional model parameters. """ |
model = get_model(with_pipeline=False)
# important: wrap the model with the parsed arguments
parsed = parse_args(kwargs, defaults=DEFAULTS_NET)
model = parsed(model)
X, y = get_data(n_samples=n_samples)
print("Training MLP classifier")
model.fit(X, y)
save_model(model, output_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pipeline(n_samples=100, output_file=None, **kwargs):
"""Train an MLP classifier in a pipeline on synthetic data. The pipeline scales the input data before passing it to the net. Note: This docstring is used to create the help for the CLI. Parameters n_samples : int (default=100) Number of training samples output_file : str (default=None) If not None, file name used to save the model. kwargs : dict Additional model parameters. """ |
model = get_model(with_pipeline=True)
# important: wrap the model with the parsed arguments
parsed = parse_args(kwargs, defaults=DEFAULTS_PIPE)
model = parsed(model)
X, y = get_data(n_samples=n_samples)
print("Training MLP classifier in a pipeline")
model.fit(X, y)
save_model(model, output_file) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict(self, X):
"""Where applicable, return class labels for samples in X. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_pred : numpy ndarray """ |
y_preds = []
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
y_preds.append(to_numpy(yp.max(-1)[-1]))
y_pred = np.concatenate(y_preds, 0)
return y_pred |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit(self, X, y, **fit_params):
"""See ``NeuralNet.fit``. In contrast to ``NeuralNet.fit``, ``y`` is non-optional to avoid mistakenly forgetting about ``y``. However, ``y`` can be set to ``None`` in case it is derived dynamically from ``X``. """ |
# pylint: disable=useless-super-delegation
# this is actually a pylint bug:
# https://github.com/PyCQA/pylint/issues/1085
return super().fit(X, y, **fit_params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict_proba(self, X):
"""Where applicable, return probability estimates for samples. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_proba : numpy ndarray """ |
y_probas = []
bce_logits_loss = isinstance(
self.criterion_, torch.nn.BCEWithLogitsLoss)
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
if bce_logits_loss:
yp = torch.sigmoid(yp)
y_probas.append(to_numpy(yp))
y_proba = np.concatenate(y_probas, 0)
return y_proba |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _apply_to_data(data, func, unpack_dict=False):
"""Apply a function to data, trying to unpack different data types. """ |
apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict)
if isinstance(data, dict):
if unpack_dict:
return [apply_(v) for v in data.values()]
return {k: apply_(v) for k, v in data.items()}
if isinstance(data, (list, tuple)):
try:
# e.g.list/tuple of arrays
return [apply_(x) for x in data]
except TypeError:
return func(data)
return func(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uses_placeholder_y(ds):
"""If ``ds`` is a ``skorch.dataset.Dataset`` or a ``skorch.dataset.Dataset`` nested inside a ``torch.utils.data.Subset`` and uses y as a placeholder, return ``True``.""" |
if isinstance(ds, torch.utils.data.Subset):
return uses_placeholder_y(ds.dataset)
return isinstance(ds, Dataset) and hasattr(ds, "y") and ds.y is None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unpack_data(data):
"""Unpack data returned by the net's iterator into a 2-tuple. If the wrong number of items is returned, raise a helpful error message. """ |
# Note: This function cannot detect it when a user only returns 1
# item that is exactly of length 2 (e.g. because the batch size is
# 2). In that case, the item will be erroneously split into X and
# y.
try:
X, y = data
return X, y
except ValueError:
# if a 1-tuple/list or something else like a torch tensor
if not isinstance(data, (tuple, list)) or len(data) < 2:
raise ValueError(ERROR_MSG_1_ITEM)
raise ValueError(ERROR_MSG_MORE_THAN_2_ITEMS.format(len(data))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X, y):
# pylint: disable=anomalous-backslash-in-string """Additional transformations on ``X`` and ``y``. By default, they are cast to PyTorch :class:`~torch.Tensor`\s. Override this if you want a different behavior. Note: If you use this in conjuction with PyTorch :class:`~torch.utils.data.DataLoader`, the latter will call the dataset for each row separately, which means that the incoming ``X`` and ``y`` each are single rows. """ |
# pytorch DataLoader cannot deal with None so we use 0 as a
# placeholder value. We only return a Tensor with one value
# (as opposed to ``batchsz`` values) since the pytorch
# DataLoader calls __getitem__ for each row in the batch
# anyway, which results in a dummy ``y`` value for each row in
# the batch.
y = torch.Tensor([0]) if y is None else y
# pytorch cannot convert sparse matrices, for now just make it
# dense; squeeze because X[i].shape is (1, n) for csr matrices
if sparse.issparse(X):
X = X.toarray().squeeze(0)
return X, y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_cv(self, y):
"""Resolve which cross validation strategy is used.""" |
y_arr = None
if self.stratified:
# Try to convert y to numpy for sklearn's check_cv; if conversion
# doesn't work, still try.
try:
y_arr = to_numpy(y)
except (AttributeError, TypeError):
y_arr = y
if self._is_float(self.cv):
return self._check_cv_float()
return self._check_cv_non_float(y_arr) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_span(s, pattern):
"""Return the span of the first group that matches the pattern.""" |
i, j = -1, -1
match = pattern.match(s)
if not match:
return i, j
for group_name in pattern.groupindex:
i, j = match.span(group_name)
if (i, j) != (-1, -1):
return i, j
return i, j |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _substitute_default(s, new_value):
"""Replaces the default value in a parameter docstring by a new value. The docstring must conform to the numpydoc style and have the form "something (keyname=<value-to-replace>)" If no matching pattern is found or ``new_value`` is None, return the input untouched. Examples -------- 'int (default=256)' 'nonlin (default = Hardtanh(min_val=1, max_val=2))' """ |
if new_value is None:
return s
# BB: ideally, I would like to replace the 'default*' group
# directly but I haven't found a way to do this
i, j = _get_span(s, pattern=P_DEFAULTS)
if (i, j) == (-1, -1):
return s
return '{}{}{}'.format(s[:i], new_value, s[j:]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _resolve_dotted_name(dotted_name):
"""Returns objects from strings Deals e.g. with 'torch.nn.Softmax(dim=-1)'. Modified from palladium: https://github.com/ottogroup/palladium/blob/8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3/palladium/util.py with added support for instantiated objects. """ |
if not isinstance(dotted_name, str):
return dotted_name
if '.' not in dotted_name:
return dotted_name
args = None
params = None
match = P_PARAMS.match(dotted_name)
if match:
dotted_name = match.group('name')
params = match.group('params')
module, name = dotted_name.rsplit('.', 1)
attr = import_module(module)
attr = getattr(attr, name)
if params:
args, kwargs = _parse_args_kwargs(params[1:-1])
attr = attr(*args, **kwargs)
return attr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_net_kwargs(kwargs):
"""Parse arguments for the estimator. Resolves dotted names and instantiated classes. Examples -------- {'lr': 0.1, 'module__nonlin': Hardtanh(min_val=-2, max_val=3)} """ |
if not kwargs:
return kwargs
resolved = {}
for k, v in kwargs.items():
resolved[k] = _resolve_dotted_name(v)
return resolved |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _yield_estimators(model):
"""Yield estimator and its prefix from the model. First, pipeline preprocessing steps are yielded (if there are any). Next the neural net is yielded. Finally, the module is yielded. """ |
yield from _yield_preproc_steps(model)
net_prefixes = []
module_prefixes = []
if isinstance(model, Pipeline):
name = model.steps[-1][0]
net_prefixes.append(name)
module_prefixes.append(name)
net = model.steps[-1][1]
else:
net = model
yield '__'.join(net_prefixes), net
module = net.module
module_prefixes.append('module')
yield '__'.join(module_prefixes), module |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_help_for_estimator(prefix, estimator, defaults=None):
"""Yield help lines for the given estimator and prefix.""" |
from numpydoc.docscrape import ClassDoc
defaults = defaults or {}
estimator = _extract_estimator_cls(estimator)
yield "<{}> options:".format(estimator.__name__)
doc = ClassDoc(estimator)
yield from _get_help_for_params(
doc['Parameters'],
prefix=prefix,
defaults=defaults,
)
yield '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_help(model, defaults=None):
"""Print help for the command line arguments of the given model. Parameters model : sklearn.base.BaseEstimator The basic model, e.g. a ``NeuralNet`` or sklearn ``Pipeline``. defautls : dict or None (default=None) Optionally, change the default values to use custom defaults. Commandline arguments have precedence over defaults. """ |
defaults = defaults or {}
print("This is the help for the model-specific parameters.")
print("To invoke help for the remaining options, run:")
print("python {} -- --help".format(sys.argv[0]))
print()
lines = (_get_help_for_estimator(prefix, estimator, defaults=defaults) for
prefix, estimator in _yield_estimators(model))
print('\n'.join(chain(*lines))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_args(kwargs, defaults=None):
"""Apply command line arguments or show help. Use this in conjunction with the fire library to quickly build command line interfaces for your scripts. This function returns another function that must be called with the estimator (e.g. ``NeuralNet``) to apply the parsed command line arguments. If the --help option is found, show the estimator-specific help instead. Examples -------- Content of my_script.py: Parameters kwargs : dict The arguments as parsed by fire. defautls : dict or None (default=None) Optionally, change the default values to use custom defaults. Commandline arguments have precedence over defaults. Returns ------- print_help_and_exit : callable If --help is in the arguments, print help and exit. set_params : callable If --help is not in the options, apply command line arguments to the estimator and return it. """ |
try:
import fire
except ImportError:
raise ImportError("Using skorch cli helpers requires the fire library,"
" you can install it with pip: pip install fire.")
try:
import numpydoc.docscrape
except ImportError:
raise ImportError("Using skorch cli helpers requires the numpydoc library,"
" you can install it with pip: pip install numpydoc.")
defaults = defaults or {}
def print_help_and_exit(estimator):
print_help(estimator, defaults=defaults)
sys.exit()
def set_params(estimator):
estimator.set_params(**defaults)
return estimator.set_params(**parse_net_kwargs(kwargs))
if kwargs.get('help'):
return print_help_and_exit
return set_params |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_requires_grad(pgroups):
"""Returns parameter groups where parameters that don't require a gradient are filtered out. Parameters pgroups : dict Parameter groups to be filtered """ |
warnings.warn(
"For filtering gradients, please use skorch.callbacks.Freezer.",
DeprecationWarning)
for pgroup in pgroups:
output = {k: v for k, v in pgroup.items() if k != 'params'}
output['params'] = (p for p in pgroup['params'] if p.requires_grad)
yield output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_cell_to_img(t, padding=16):
"""Converts pytorch tensor into a Pillow Image. The padding will be removed from the resulting image""" |
std = torch.Tensor([0.229, 0.224, 0.225]).reshape(-1, 1, 1)
mu = torch.Tensor([0.485, 0.456, 0.406]).reshape(-1, 1, 1)
output = t.mul(std)
output.add_(mu)
img = to_pil_image(output)
w, h = img.size
return img.crop((padding, padding, w - padding, h - padding)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_mask_cells(mask_cells, padding=16):
"""Plots cells with their true mask, predicted mask. Parameters mask_cells: list of tuples (`true_mask`, `predicted_mask`, `cell`) padding: int (default=16) Padding around mask to remove. """ |
fig, axes = plt.subplots(len(mask_cells), 3, figsize=(12, 10))
for idx, (axes, mask_cell) in enumerate(zip(axes, mask_cells), 1):
ax1, ax2, ax3 = axes
true_mask, predicted_mask, cell = mask_cell
plot_mask_cell(
true_mask, predicted_mask, cell,
'Type {}'.format(idx),
ax1, ax2, ax3,
padding=padding)
fig.tight_layout()
return fig, axes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_mask_cell(true_mask, predicted_mask, cell, suffix, ax1, ax2, ax3, padding=16):
"""Plots a single cell with a its true mask and predicuted mask""" |
for ax in [ax1, ax2, ax3]:
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax1.imshow(true_mask[padding:-padding, padding:-padding], cmap='viridis')
ax1.set_title('True Mask - {}'.format(suffix))
ax2.imshow(
predicted_mask[padding:-padding, padding:-padding], cmap='viridis')
ax2.set_title('Predicted Mask - {}'.format(suffix))
ax3.imshow(convert_cell_to_img(cell, padding=padding))
ax3.set_title('Image - {}'.format(suffix))
return ax1, ax2, ax3 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_masks(mask_1, mask_2, mask_3):
"""Plots three masks""" |
fig, ((ax1, ax2, ax3)) = plt.subplots(1, 3, figsize=(12, 5))
for ax in [ax1, ax2, ax3]:
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax1.set_title("Type 1")
ax1.imshow(mask_1, cmap='viridis')
ax2.set_title("Type 2")
ax2.imshow(mask_2, cmap='viridis')
ax3.set_title("Type 3")
ax3.imshow(mask_3, cmap='viridis')
return ax1, ax2, ax3 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_cells(cell_1, cell_2, cell_3):
"""Plots three cells""" |
fig, ((ax1, ax2, ax3)) = plt.subplots(1, 3, figsize=(12, 5))
for ax in [ax1, ax2, ax3]:
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax1.set_title("Type 1")
ax1.imshow(cell_1)
ax2.set_title("Type 2")
ax2.imshow(cell_2)
ax3.set_title("Type 3")
ax3.imshow(cell_3)
return ax1, ax2, ax3 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sorted_keys(self, keys):
"""Sort keys, dropping the ones that should be ignored. The keys that are in ``self.ignored_keys`` or that end on '_best' are dropped. Among the remaining keys: * 'epoch' is put first; * 'dur' is put last; * keys that start with 'event_' are put just before 'dur'; * all remaining keys are sorted alphabetically. """ |
sorted_keys = []
if ('epoch' in keys) and ('epoch' not in self.keys_ignored_):
sorted_keys.append('epoch')
for key in sorted(keys):
if not (
(key in ('epoch', 'dur')) or
(key in self.keys_ignored_) or
key.endswith('_best') or
key.startswith('event_')
):
sorted_keys.append(key)
for key in sorted(keys):
if key.startswith('event_') and (key not in self.keys_ignored_):
sorted_keys.append(key)
if ('dur' in keys) and ('dur' not in self.keys_ignored_):
sorted_keys.append('dur')
return sorted_keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_tensor(X, device, accept_sparse=False):
"""Turn input data to torch tensor. Parameters X : input data Handles the cases: * PackedSequence * numpy array * torch Tensor * scipy sparse CSR matrix * list or tuple of one of the former * dict with values of one of the former device : str, torch.device The compute device to be used. If set to 'cuda', data in torch tensors will be pushed to cuda tensors before being sent to the module. accept_sparse : bool (default=False) Whether to accept scipy sparse matrices as input. If False, passing a sparse matrix raises an error. If True, it is converted to a torch COO tensor. Returns ------- output : torch Tensor """ |
to_tensor_ = partial(to_tensor, device=device)
if is_torch_data_type(X):
return X.to(device)
if isinstance(X, dict):
return {key: to_tensor_(val) for key, val in X.items()}
if isinstance(X, (list, tuple)):
return [to_tensor_(x) for x in X]
if np.isscalar(X):
return torch.as_tensor(X, device=device)
if isinstance(X, Sequence):
return torch.as_tensor(np.array(X), device=device)
if isinstance(X, np.ndarray):
return torch.as_tensor(X, device=device)
if sparse.issparse(X):
if accept_sparse:
return torch.sparse_coo_tensor(
X.nonzero(), X.data, size=X.shape).to(device)
raise TypeError("Sparse matrices are not supported. Set "
"accept_sparse=True to allow sparse matrices.")
raise TypeError("Cannot convert this data type to a torch tensor.") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_numpy(X):
"""Generic function to convert a pytorch tensor to numpy. Returns X when it already is a numpy array. """ |
if isinstance(X, np.ndarray):
return X
if is_pandas_ndframe(X):
return X.values
if not is_torch_data_type(X):
raise TypeError("Cannot convert this data type to a numpy array.")
if X.is_cuda:
X = X.cpu()
if X.requires_grad:
X = X.detach()
return X.numpy() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _normalize_numpy_indices(i):
"""Normalize the index in case it is a numpy integer or boolean array.""" |
if isinstance(i, np.ndarray):
if i.dtype == bool:
i = tuple(j.tolist() for j in i.nonzero())
elif i.dtype == int:
i = i.tolist()
return i |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def multi_indexing(data, i, indexing=None):
"""Perform indexing on multiple data structures. Currently supported data types: * numpy arrays * torch tensors * pandas NDFrame * a dictionary of the former three * a list/tuple of the former three ``i`` can be an integer or a slice. Examples -------- 1 array([1, 2]) tensor([ 1., 2.]) [[1, 2], [4, 5]] {'a': [2, 3], 'b': [5, 6]} a b 1 2 5 2 3 6 Parameters data Data of a type mentioned above. i : int or slice Slicing index. indexing : function/callable or None (default=None) If not None, use this function for indexing into the data. If None, try to automatically determine how to index data. """ |
# in case of i being a numpy array
i = _normalize_numpy_indices(i)
# If we already know how to index, use that knowledge
if indexing is not None:
return indexing(data, i)
# If we don't know how to index, find out and apply
return check_indexing(data)(data, i) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def duplicate_items(*collections):
"""Search for duplicate items in all collections. Examples -------- set() set() {'a'} {2, 3} """ |
duplicates = set()
seen = set()
for item in flatten(collections):
if item in seen:
duplicates.add(item)
else:
seen.add(item)
return duplicates |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def params_for(prefix, kwargs):
"""Extract parameters that belong to a given sklearn module prefix from ``kwargs``. This is useful to obtain parameters that belong to a submodule. Examples -------- {'a': 3, 'b': 4} """ |
if not prefix.endswith('__'):
prefix += '__'
return {key[len(prefix):]: val for key, val in kwargs.items()
if key.startswith(prefix)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def data_from_dataset(dataset, X_indexing=None, y_indexing=None):
"""Try to access X and y attribute from dataset. Also works when dataset is a subset. Parameters dataset : skorch.dataset.Dataset or torch.utils.data.Subset The incoming dataset should be a ``skorch.dataset.Dataset`` or a ``torch.utils.data.Subset`` of a ``skorch.dataset.Dataset``. X_indexing : function/callable or None (default=None) If not None, use this function for indexing into the X data. If None, try to automatically determine how to index data. y_indexing : function/callable or None (default=None) If not None, use this function for indexing into the y data. If None, try to automatically determine how to index data. """ |
X, y = _none, _none
if isinstance(dataset, Subset):
X, y = data_from_dataset(
dataset.dataset, X_indexing=X_indexing, y_indexing=y_indexing)
X = multi_indexing(X, dataset.indices, indexing=X_indexing)
y = multi_indexing(y, dataset.indices, indexing=y_indexing)
elif hasattr(dataset, 'X') and hasattr(dataset, 'y'):
X, y = dataset.X, dataset.y
if (X is _none) or (y is _none):
raise AttributeError("Could not access X and y from dataset.")
return X, y |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_skorch_dataset(ds):
"""Checks if the supplied dataset is an instance of ``skorch.dataset.Dataset`` even when it is nested inside ``torch.util.data.Subset``.""" |
from skorch.dataset import Dataset
if isinstance(ds, Subset):
return is_skorch_dataset(ds.dataset)
return isinstance(ds, Dataset) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open_file_like(f, mode):
"""Wrapper for opening a file""" |
new_fd = isinstance(f, (str, pathlib.Path))
if new_fd:
f = open(f, mode)
try:
yield f
finally:
if new_fd:
f.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cache_net_infer(net, use_caching, y_preds):
"""Caching context for ``skorch.NeuralNet`` instance. Returns a modified version of the net whose ``infer`` method will subsequently return cached predictions. Leaving the context will undo the overwrite of the ``infer`` method.""" |
if not use_caching:
yield net
return
y_preds = iter(y_preds)
net.infer = lambda *a, **kw: next(y_preds)
try:
yield net
finally:
# By setting net.infer we define an attribute `infer`
# that precedes the bound method `infer`. By deleting
# the entry from the attribute dict we undo this.
del net.__dict__['infer'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_sklearn_metric_function(scoring):
"""If ``scoring`` is a sklearn metric function, convert it to a sklearn scorer and return it. Otherwise, return ``scoring`` unchanged.""" |
if callable(scoring):
module = getattr(scoring, '__module__', None)
if (
hasattr(module, 'startswith') and
module.startswith('sklearn.metrics.') and
not module.startswith('sklearn.metrics.scorer') and
not module.startswith('sklearn.metrics.tests.')
):
return make_scorer(scoring)
return scoring |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_name(self):
"""Find name of scoring function.""" |
if self.name is not None:
return self.name
if self.scoring_ is None:
return 'score'
if isinstance(self.scoring_, str):
return self.scoring_
if isinstance(self.scoring_, partial):
return self.scoring_.func.__name__
if isinstance(self.scoring_, _BaseScorer):
return self.scoring_._score_func.__name__
return self.scoring_.__name__ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _scoring(self, net, X_test, y_test):
"""Resolve scoring and apply it to data. Use cached prediction instead of running inference again, if available.""" |
scorer = check_scoring(net, self.scoring_)
scores = _score(
estimator=net,
X_test=X_test,
y_test=y_test,
scorer=scorer,
is_multimetric=False,
)
return scores |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _record_score(self, history, current_score):
"""Record the current store and, if applicable, if it's the best score yet. """ |
history.record(self.name_, current_score)
is_best = self._is_best_score(current_score)
if is_best is None:
return
history.record(self.name_ + '_best', bool(is_best))
if is_best:
self.best_score_ = current_score |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calcuate_bboxes(im_shape, patch_size):
"""Calculate bound boxes based on image shape and size of the bounding box given by `patch_size`""" |
h, w = im_shape
ph, pw = patch_size
steps_h = chain(range(0, h - ph, ph), [h - ph])
steps_w = chain(range(0, w - pw, pw), [w - pw])
return product(steps_h, steps_w) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_classifier(output_nonlin=nn.Softmax(dim=-1), **kwargs):
"""Return a multi-layer perceptron to be used with NeuralNetClassifier. Parameters input_units : int (default=20) Number of input units. output_units : int (default=2) Number of output units. hidden_units : int (default=10) Number of units in hidden layers. num_hidden : int (default=1) Number of hidden layers. nonlin : torch.nn.Module instance (default=torch.nn.ReLU()) Non-linearity to apply after hidden layers. dropout : float (default=0) Dropout rate. Dropout is applied between layers. """ |
return partial(MLPModule, output_nonlin=output_nonlin, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def notify(self, method_name, **cb_kwargs):
"""Call the callback method specified in ``method_name`` with parameters specified in ``cb_kwargs``. Method names can be one of: * on_train_begin * on_train_end * on_epoch_begin * on_epoch_end * on_batch_begin * on_batch_end """ |
getattr(self, method_name)(self, **cb_kwargs)
for _, cb in self.callbacks_:
getattr(cb, method_name)(self, **cb_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _yield_callbacks(self):
"""Yield all callbacks set on this instance including a set whether its name was set by the user. Handles these cases: * default and user callbacks * callbacks with and without name * initialized and uninitialized callbacks * puts PrintLog(s) last """ |
print_logs = []
for item in self.get_default_callbacks() + (self.callbacks or []):
if isinstance(item, (tuple, list)):
named_by_user = True
name, cb = item
else:
named_by_user = False
cb = item
if isinstance(cb, type): # uninitialized:
name = cb.__name__
else:
name = cb.__class__.__name__
if isinstance(cb, PrintLog) or (cb == PrintLog):
print_logs.append((name, cb, named_by_user))
else:
yield name, cb, named_by_user
yield from print_logs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _callbacks_grouped_by_name(self):
"""Group callbacks by name and collect names set by the user.""" |
callbacks, names_set_by_user = OrderedDict(), set()
for name, cb, named_by_user in self._yield_callbacks():
if named_by_user:
names_set_by_user.add(name)
callbacks[name] = callbacks.get(name, []) + [cb]
return callbacks, names_set_by_user |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _uniquely_named_callbacks(self):
"""Make sure that the returned dict of named callbacks is unique w.r.t. to the callback name. User-defined names will not be renamed on conflict, instead an exception will be raised. The same goes for the event where renaming leads to a conflict. """ |
grouped_cbs, names_set_by_user = self._callbacks_grouped_by_name()
for name, cbs in grouped_cbs.items():
if len(cbs) > 1 and name in names_set_by_user:
raise ValueError("Found duplicate user-set callback name "
"'{}'. Use unique names to correct this."
.format(name))
for i, cb in enumerate(cbs):
if len(cbs) > 1:
unique_name = '{}_{}'.format(name, i+1)
if unique_name in grouped_cbs:
raise ValueError("Assigning new callback name failed "
"since new name '{}' exists already."
.format(unique_name))
else:
unique_name = name
yield unique_name, cb |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize_callbacks(self):
"""Initializes all callbacks and save the result in the ``callbacks_`` attribute. Both ``default_callbacks`` and ``callbacks`` are used (in that order). Callbacks may either be initialized or not, and if they don't have a name, the name is inferred from the class name. The ``initialize`` method is called on all callbacks. The final result will be a list of tuples, where each tuple consists of a name and an initialized callback. If names are not unique, a ValueError is raised. """ |
callbacks_ = []
class Dummy:
# We cannot use None as dummy value since None is a
# legitimate value to be set.
pass
for name, cb in self._uniquely_named_callbacks():
# check if callback itself is changed
param_callback = getattr(self, 'callbacks__' + name, Dummy)
if param_callback is not Dummy: # callback itself was set
cb = param_callback
# below: check for callback params
# don't set a parameter for non-existing callback
params = self._get_params_for('callbacks__{}'.format(name))
if (cb is None) and params:
raise ValueError("Trying to set a parameter for callback {} "
"which does not exist.".format(name))
if cb is None:
continue
if isinstance(cb, type): # uninitialized:
cb = cb(**params)
else:
cb.set_params(**params)
cb.initialize()
callbacks_.append((name, cb))
self.callbacks_ = callbacks_
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize_criterion(self):
"""Initializes the criterion.""" |
criterion_params = self._get_params_for('criterion')
self.criterion_ = self.criterion(**criterion_params)
if isinstance(self.criterion_, torch.nn.Module):
self.criterion_ = self.criterion_.to(self.device)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_reinit_msg(self, name, kwargs=None, triggered_directly=True):
"""Returns a message that informs about re-initializing a compoment. Sometimes, the module or optimizer need to be re-initialized. Not only should the user receive a message about this but also should they be informed about what parameters, if any, caused it. """ |
msg = "Re-initializing {}".format(name)
if triggered_directly and kwargs:
msg += (" because the following parameters were re-set: {}."
.format(', '.join(sorted(kwargs))))
else:
msg += "."
return msg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialize_module(self):
"""Initializes the module. Note that if the module has learned parameters, those will be reset. """ |
kwargs = self._get_params_for('module')
module = self.module
is_initialized = isinstance(module, torch.nn.Module)
if kwargs or not is_initialized:
if is_initialized:
module = type(module)
if (is_initialized or self.initialized_) and self.verbose:
msg = self._format_reinit_msg("module", kwargs)
print(msg)
module = module(**kwargs)
self.module_ = module.to(self.device)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validation_step(self, Xi, yi, **fit_params):
"""Perform a forward step using batched data and return the resulting loss. The module is set to be in evaluation mode (e.g. dropout is not applied). Parameters Xi : input data A batch of the input data. yi : target data A batch of the target data. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call. """ |
self.module_.eval()
with torch.no_grad():
y_pred = self.infer(Xi, **fit_params)
loss = self.get_loss(y_pred, yi, X=Xi, training=False)
return {
'loss': loss,
'y_pred': y_pred,
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def train_step_single(self, Xi, yi, **fit_params):
"""Compute y_pred, loss value, and update net's gradients. The module is set to be in train mode (e.g. dropout is applied). Parameters Xi : input data A batch of the input data. yi : target data A batch of the target data. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call. """ |
self.module_.train()
self.optimizer_.zero_grad()
y_pred = self.infer(Xi, **fit_params)
loss = self.get_loss(y_pred, yi, X=Xi, training=True)
loss.backward()
self.notify(
'on_grad_computed',
named_parameters=TeeGenerator(self.module_.named_parameters()),
X=Xi,
y=yi
)
return {
'loss': loss,
'y_pred': y_pred,
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def train_step(self, Xi, yi, **fit_params):
"""Prepares a loss function callable and pass it to the optimizer, hence performing one optimization step. Loss function callable as required by some optimizers (and accepted by all of them):
https://pytorch.org/docs/master/optim.html#optimizer-step-closure The module is set to be in train mode (e.g. dropout is applied). Parameters Xi : input data A batch of the input data. yi : target data A batch of the target data. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the train_split call. """ |
step_accumulator = self.get_train_step_accumulator()
def step_fn():
step = self.train_step_single(Xi, yi, **fit_params)
step_accumulator.store_step(step)
return step['loss']
self.optimizer_.step(step_fn)
return step_accumulator.get_step() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def evaluation_step(self, Xi, training=False):
"""Perform a forward step to produce the output used for prediction and scoring. Therefore the module is set to evaluation mode by default beforehand which can be overridden to re-enable features like dropout by setting ``training=True``. """ |
with torch.set_grad_enabled(training):
self.module_.train(training)
return self.infer(Xi) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_loop(self, X, y=None, epochs=None, **fit_params):
"""The proper fit loop. Contains the logic of what actually happens during the fit loop. Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. epochs : int or None (default=None) If int, train for this number of epochs; if None, use ``self.max_epochs``. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call. """ |
self.check_data(X, y)
epochs = epochs if epochs is not None else self.max_epochs
dataset_train, dataset_valid = self.get_split_datasets(
X, y, **fit_params)
on_epoch_kwargs = {
'dataset_train': dataset_train,
'dataset_valid': dataset_valid,
}
y_train_is_ph = uses_placeholder_y(dataset_train)
y_valid_is_ph = uses_placeholder_y(dataset_valid)
for _ in range(epochs):
self.notify('on_epoch_begin', **on_epoch_kwargs)
for data in self.get_iterator(dataset_train, training=True):
Xi, yi = unpack_data(data)
yi_res = yi if not y_train_is_ph else None
self.notify('on_batch_begin', X=Xi, y=yi_res, training=True)
step = self.train_step(Xi, yi, **fit_params)
self.history.record_batch('train_loss', step['loss'].item())
self.history.record_batch('train_batch_size', get_len(Xi))
self.notify('on_batch_end', X=Xi, y=yi_res, training=True, **step)
if dataset_valid is None:
self.notify('on_epoch_end', **on_epoch_kwargs)
continue
for data in self.get_iterator(dataset_valid, training=False):
Xi, yi = unpack_data(data)
yi_res = yi if not y_valid_is_ph else None
self.notify('on_batch_begin', X=Xi, y=yi_res, training=False)
step = self.validation_step(Xi, yi, **fit_params)
self.history.record_batch('valid_loss', step['loss'].item())
self.history.record_batch('valid_batch_size', get_len(Xi))
self.notify('on_batch_end', X=Xi, y=yi_res, training=False, **step)
self.notify('on_epoch_end', **on_epoch_kwargs)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def partial_fit(self, X, y=None, classes=None, **fit_params):
"""Fit the module. If the module is initialized, it is not re-initialized, which means that this method should be used if you want to continue training a model (warm start). Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. classes : array, sahpe (n_classes,) Solely for sklearn compatibility, currently unused. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call. """ |
if not self.initialized_:
self.initialize()
self.notify('on_train_begin', X=X, y=y)
try:
self.fit_loop(X, y, **fit_params)
except KeyboardInterrupt:
pass
self.notify('on_train_end', X=X, y=y)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit(self, X, y=None, **fit_params):
"""Initialize and fit the module. If the module was already initialized, by calling fit, the module will be re-initialized (unless ``warm_start`` is True). Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call. """ |
if not self.warm_start or not self.initialized_:
self.initialize()
self.partial_fit(X, y, **fit_params)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forward_iter(self, X, training=False, device='cpu'):
"""Yield outputs of module forward calls on each batch of data. The storage device of the yielded tensors is determined by the ``device`` parameter. Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. training : bool (default=False) Whether to set the module to train mode or not. device : string (default='cpu') The device to store each inference result on. This defaults to CPU memory since there is genereally more memory available there. For performance reasons this might be changed to a specific CUDA device, e.g. 'cuda:0'. Yields ------ yp : torch tensor Result from a forward call on an individual batch. """ |
dataset = self.get_dataset(X)
iterator = self.get_iterator(dataset, training=training)
for data in iterator:
Xi = unpack_data(data)[0]
yp = self.evaluation_step(Xi, training=training)
if isinstance(yp, tuple):
yield tuple(n.to(device) for n in yp)
else:
yield yp.to(device) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forward(self, X, training=False, device='cpu'):
"""Gather and concatenate the output from forward call with input data. The outputs from ``self.module_.forward`` are gathered on the compute device specified by ``device`` and then concatenated using PyTorch :func:`~torch.cat`. If multiple outputs are returned by ``self.module_.forward``, each one of them must be able to be concatenated this way. Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. training : bool (default=False) Whether to set the module to train mode or not. device : string (default='cpu') The device to store each inference result on. This defaults to CPU memory since there is genereally more memory available there. For performance reasons this might be changed to a specific CUDA device, e.g. 'cuda:0'. Returns ------- y_infer : torch tensor The result from the forward step. """ |
y_infer = list(self.forward_iter(X, training=training, device=device))
is_multioutput = len(y_infer) > 0 and isinstance(y_infer[0], tuple)
if is_multioutput:
return tuple(map(torch.cat, zip(*y_infer)))
return torch.cat(y_infer) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def infer(self, x, **fit_params):
"""Perform a single inference step on a batch of data. Parameters x : input data A batch of the input data. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call. """ |
x = to_tensor(x, device=self.device)
if isinstance(x, dict):
x_dict = self._merge_x_and_fit_params(x, fit_params)
return self.module_(**x_dict)
return self.module_(x, **fit_params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def predict_proba(self, X):
"""Return the output of the module's forward method as a numpy array. If the module's forward method returns multiple outputs as a tuple, it is assumed that the first output contains the relevant information and the other values are ignored. If all values are relevant, consider using :func:`~skorch.NeuralNet.forward` instead. Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. Returns ------- y_proba : numpy ndarray """ |
y_probas = []
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
y_probas.append(to_numpy(yp))
y_proba = np.concatenate(y_probas, 0)
return y_proba |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_loss(self, y_pred, y_true, X=None, training=False):
"""Return the loss for this batch. Parameters y_pred : torch tensor Predicted target values y_true : torch tensor True target values. X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. training : bool (default=False) Whether train mode should be used or not. """ |
y_true = to_tensor(y_true, device=self.device)
return self.criterion_(y_pred, y_true) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dataset(self, X, y=None):
"""Get a dataset that contains the input data and is passed to the iterator. Override this if you want to initialize your dataset differently. Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. Returns ------- dataset The initialized dataset. """ |
if is_dataset(X):
return X
dataset = self.dataset
is_initialized = not callable(dataset)
kwargs = self._get_params_for('dataset')
if kwargs and is_initialized:
raise TypeError("Trying to pass an initialized Dataset while "
"passing Dataset arguments ({}) is not "
"allowed.".format(kwargs))
if is_initialized:
return dataset
return dataset(X, y, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_split_datasets(self, X, y=None, **fit_params):
"""Get internal train and validation datasets. The validation dataset can be None if ``self.train_split`` is set to None; then internal validation will be skipped. Override this if you want to change how the net splits incoming data into train and validation part. Parameters X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. **fit_params : dict Additional parameters passed to the ``self.train_split`` call. Returns ------- dataset_train The initialized training dataset. dataset_valid The initialized validation dataset or None """ |
dataset = self.get_dataset(X, y)
if self.train_split:
dataset_train, dataset_valid = self.train_split(
dataset, y, **fit_params)
else:
dataset_train, dataset_valid = dataset, None
return dataset_train, dataset_valid |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_iterator(self, dataset, training=False):
"""Get an iterator that allows to loop over the batches of the given data. If ``self.iterator_train__batch_size`` and/or ``self.iterator_test__batch_size`` are not set, use ``self.batch_size`` instead. Parameters dataset : torch Dataset (default=skorch.dataset.Dataset) Usually, ``self.dataset``, initialized with the corresponding data, is passed to ``get_iterator``. training : bool (default=False) Whether to use ``iterator_train`` or ``iterator_test``. Returns ------- iterator An instantiated iterator that allows to loop over the mini-batches. """ |
if training:
kwargs = self._get_params_for('iterator_train')
iterator = self.iterator_train
else:
kwargs = self._get_params_for('iterator_valid')
iterator = self.iterator_valid
if 'batch_size' not in kwargs:
kwargs['batch_size'] = self.batch_size
if kwargs['batch_size'] == -1:
kwargs['batch_size'] = len(dataset)
return iterator(dataset, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_params(self, **kwargs):
"""Set the parameters of this class. Valid parameter keys can be listed with ``get_params()``. Returns ------- self """ |
self._check_deprecated_params(**kwargs)
normal_params, cb_params, special_params = {}, {}, {}
virtual_params = {}
for key, val in kwargs.items():
if self._is_virtual_param(key):
virtual_params[key] = val
elif key.startswith('callbacks'):
cb_params[key] = val
elif any(key.startswith(prefix) for prefix in self.prefixes_):
special_params[key] = val
else:
normal_params[key] = val
self._apply_virtual_params(virtual_params)
BaseEstimator.set_params(self, **normal_params)
for key, val in special_params.items():
if key.endswith('_'):
raise ValueError(
"Something went wrong here. Please open an issue on "
"https://github.com/dnouri/skorch/issues detailing what "
"caused this error.")
else:
setattr(self, key, val)
# Below: Re-initialize parts of the net if necessary.
if cb_params:
# callbacks need special treatmeant since they are list of tuples
self.initialize_callbacks()
self._set_params_callback(**cb_params)
if any(key.startswith('criterion') for key in special_params):
self.initialize_criterion()
module_triggers_optimizer_reinit = False
if any(key.startswith('module') for key in special_params):
self.initialize_module()
module_triggers_optimizer_reinit = True
optimizer_changed = (
any(key.startswith('optimizer') for key in special_params) or
'lr' in normal_params
)
if module_triggers_optimizer_reinit or optimizer_changed:
# Model selectors such as GridSearchCV will set the
# parameters before .initialize() is called, therefore we
# need to make sure that we have an initialized model here
# as the optimizer depends on it.
if not hasattr(self, 'module_'):
self.initialize_module()
# If we reached this point but the optimizer was not
# changed, it means that optimizer initialization was
# triggered indirectly.
self.initialize_optimizer(triggered_directly=optimizer_changed)
vars(self).update(kwargs)
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_params_callback(self, **params):
"""Special handling for setting params on callbacks.""" |
# model after sklearn.utils._BaseCompostion._set_params
# 1. All steps
if 'callbacks' in params:
setattr(self, 'callbacks', params.pop('callbacks'))
# 2. Step replacement
names, _ = zip(*getattr(self, 'callbacks_'))
for key in params.copy():
name = key[11:] # drop 'callbacks__'
if '__' not in name and name in names:
self._replace_callback(name, params.pop(key))
# 3. Step parameters and other initilisation arguments
for key in params.copy():
name = key[11:]
part0, part1 = name.split('__')
kwarg = {part1: params.pop(key)}
callback = dict(self.callbacks_).get(part0)
if callback is not None:
callback.set_params(**kwarg)
else:
raise ValueError(
"Trying to set a parameter for callback {} "
"which does not exist.".format(part0))
return self |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_params( self, f=None, f_params=None, f_optimizer=None, f_history=None):
"""Saves the module's parameters, history, and optimizer, not the whole object. To save the whole object, use pickle. ``f_params`` and ``f_optimizer`` uses PyTorchs' :func:`~torch.save`. Parameters f_params : file-like object, str, None (default=None) Path of module parameters. Pass ``None`` to not save f_optimizer : file-like object, str, None (default=None) Path of optimizer. Pass ``None`` to not save f_history : file-like object, str, None (default=None) Path to history. Pass ``None`` to not save f : deprecated Examples -------- """ |
# TODO: Remove warning in a future release
if f is not None:
warnings.warn(
"f argument was renamed to f_params and will be removed "
"in the next release. To make your code future-proof it is "
"recommended to explicitly specify keyword arguments' names "
"instead of relying on positional order.",
DeprecationWarning)
f_params = f
if f_params is not None:
if not hasattr(self, 'module_'):
raise NotInitializedError(
"Cannot save parameters of an un-initialized model. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
torch.save(self.module_.state_dict(), f_params)
if f_optimizer is not None:
if not hasattr(self, 'optimizer_'):
raise NotInitializedError(
"Cannot save state of an un-initialized optimizer. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
torch.save(self.optimizer_.state_dict(), f_optimizer)
if f_history is not None:
self.history.to_file(f_history) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_device(self, requested_device, map_device):
"""Compare the requested device with the map device and return the map device if it differs from the requested device along with a warning. """ |
type_1 = torch.device(requested_device)
type_2 = torch.device(map_device)
if type_1 != type_2:
warnings.warn(
'Setting self.device = {} since the requested device ({}) '
'is not available.'.format(map_device, requested_device),
DeviceWarning)
return map_device
# return requested_device instead of map_device even though we
# checked for *type* equality as we might have 'cuda:0' vs. 'cuda:1'.
return requested_device |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_params( self, f=None, f_params=None, f_optimizer=None, f_history=None, checkpoint=None):
"""Loads the the module's parameters, history, and optimizer, not the whole object. To save and load the whole object, use pickle. ``f_params`` and ``f_optimizer`` uses PyTorchs' :func:`~torch.save`. Parameters f_params : file-like object, str, None (default=None) Path of module parameters. Pass ``None`` to not load. f_optimizer : file-like object, str, None (default=None) Path of optimizer. Pass ``None`` to not load. f_history : file-like object, str, None (default=None) Path to history. Pass ``None`` to not load. checkpoint : :class:`.Checkpoint`, None (default=None) Checkpoint to load params from. If a checkpoint and a ``f_*`` path is passed in, the ``f_*`` will be loaded. Pass ``None`` to not load. f : deprecated Examples -------- """ |
def _get_state_dict(f):
map_location = get_map_location(self.device)
self.device = self._check_device(self.device, map_location)
return torch.load(f, map_location=map_location)
# TODO: Remove warning in a future release
if f is not None:
warnings.warn(
"f is deprecated in save_params and will be removed in the "
"next release, please use f_params instead",
DeprecationWarning)
f_params = f
if f_history is not None:
self.history = History.from_file(f_history)
if checkpoint is not None:
if f_history is None and checkpoint.f_history is not None:
self.history = History.from_file(checkpoint.f_history_)
formatted_files = checkpoint.get_formatted_files(self)
f_params = f_params or formatted_files['f_params']
f_optimizer = f_optimizer or formatted_files['f_optimizer']
if f_params is not None:
if not hasattr(self, 'module_'):
raise NotInitializedError(
"Cannot load parameters of an un-initialized model. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
state_dict = _get_state_dict(f_params)
self.module_.load_state_dict(state_dict)
if f_optimizer is not None:
if not hasattr(self, 'optimizer_'):
raise NotInitializedError(
"Cannot load state of an un-initialized optimizer. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...).")
state_dict = _get_state_dict(f_optimizer)
self.optimizer_.load_state_dict(state_dict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_history(self, f):
"""Saves the history of ``NeuralNet`` as a json file. In order to use this feature, the history must only contain JSON encodable Python data structures. Numpy and PyTorch types should not be in the history. Parameters f : file-like object or str Examples -------- """ |
# TODO: Remove warning in a future release
warnings.warn(
"save_history is deprecated and will be removed in the next "
"release, please use save_params with the f_history keyword",
DeprecationWarning)
self.history.to_file(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_history(self, f):
"""Load the history of a ``NeuralNet`` from a json file. See ``save_history`` for examples. Parameters f : file-like object or str """ |
# TODO: Remove warning in a future release
warnings.warn(
"load_history is deprecated and will be removed in the next "
"release, please use load_params with the f_history keyword",
DeprecationWarning)
self.history = History.from_file(f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _not_none(items):
"""Whether the item is a placeholder or contains a placeholder.""" |
if not isinstance(items, (tuple, list)):
items = (items,)
return all(item is not _none for item in items) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _filter_none(items):
"""Filter special placeholder value, preserves sequence type.""" |
type_ = list if isinstance(items, list) else tuple
return type_(filter(_not_none, items)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _getitem(item, i):
"""Extract value or values from dicts. Covers the case of a single key or multiple keys. If not found, return placeholders instead. """ |
if not isinstance(i, (tuple, list)):
return item.get(i, _none)
type_ = list if isinstance(item, list) else tuple
return type_(item.get(j, _none) for j in i) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _unpack_index(i):
"""Unpack index and return exactly four elements. If index is more shallow than 4, return None for trailing dimensions. If index is deeper than 4, raise a KeyError. """ |
if len(i) > 4:
raise KeyError(
"Tried to index history with {} indices but only "
"4 indices are possible.".format(len(i)))
# fill trailing indices with None
i_e, k_e, i_b, k_b = i + tuple([None] * (4 - len(i)))
# handle special case of
# history[j, 'batches', somekey]
# which should really be
# history[j, 'batches', :, somekey]
if i_b is not None and not isinstance(i_b, (int, slice)):
if k_b is not None:
raise KeyError("The last argument '{}' is invalid; it must be a "
"string or tuple of strings.".format(k_b))
warnings.warn(
"Argument 3 to history slicing must be of type int or slice, e.g. "
"history[:, 'batches', 'train_loss'] should be "
"history[:, 'batches', :, 'train_loss'].",
DeprecationWarning,
)
i_b, k_b = slice(None), i_b
return i_e, k_e, i_b, k_b |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def record(self, attr, value):
"""Add a new value to the given column for the current epoch. """ |
msg = "Call new_epoch before recording for the first time."
if not self:
raise ValueError(msg)
self[-1][attr] = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_file(cls, f):
"""Load the history of a ``NeuralNet`` from a json file. Parameters f : file-like object or str """ |
with open_file_like(f, 'r') as fp:
return cls(json.load(fp)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_file(self, f):
"""Saves the history as a json file. In order to use this feature, the history must only contain JSON encodable Python data structures. Numpy and PyTorch types should not be in the history. Parameters f : file-like object or str """ |
with open_file_like(f, 'w') as fp:
json.dump(self.to_list(), fp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_formatted_files(self, net):
"""Returns a dictionary of formatted filenames""" |
idx = -1
if (
self.event_name is not None and
net.history
):
for i, v in enumerate(net.history[:, self.event_name]):
if v:
idx = i
return {
"f_params": self._format_target(net, self.f_params, idx),
"f_optimizer": self._format_target(net, self.f_optimizer, idx),
"f_history": self.f_history_,
"f_pickle": self._format_target(net, self.f_pickle, idx)
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _format_target(self, net, f, idx):
"""Apply formatting to the target filename template.""" |
if f is None:
return None
if isinstance(f, str):
f = self.fn_prefix + f.format(
net=net,
last_epoch=net.history[idx],
last_batch=net.history[idx, 'batches', -1],
)
return os.path.join(self.dirname, f)
return f |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_filenames(self):
"""Checks if passed filenames are valid. Specifically, f_* parameter should not be passed in conjunction with dirname. """ |
if not self.dirname:
return
def _is_truthy_and_not_str(f):
return f and not isinstance(f, str)
if (
_is_truthy_and_not_str(self.f_optimizer) or
_is_truthy_and_not_str(self.f_params) or
_is_truthy_and_not_str(self.f_history) or
_is_truthy_and_not_str(self.f_pickle)
):
raise SkorchException(
'dirname can only be used when f_* are strings') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _calc_new_threshold(self, score):
"""Determine threshold based on score.""" |
if self.threshold_mode == 'rel':
abs_threshold_change = self.threshold * score
else:
abs_threshold_change = self.threshold
if self.lower_is_better:
new_threshold = score - abs_threshold_change
else:
new_threshold = score + abs_threshold_change
return new_threshold |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def repackage_hidden(self, h):
"""Wraps hidden states in new Variables, to detach them from their history.""" |
if isinstance(h, Variable):
return torch.tensor(h.data, device=h.device)
else:
return tuple(self.repackage_hidden(v) for v in h) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_optimizer_param(optimizer, param_group, param_name, value):
"""Set a parameter on an all or a specific parameter group of an optimizer instance. To select all param groups, use ``param_group='all'``. """ |
if param_group == 'all':
groups = optimizer.param_groups
else:
groups = [optimizer.param_groups[int(param_group)]]
for group in groups:
group[param_name] = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def optimizer_setter( net, param, value, optimizer_attr='optimizer_', optimizer_name='optimizer' ):
"""Handle setting of optimizer parameters such as learning rate and parameter group specific parameters such as momentum. The parameters ``optimizer_attr`` and ``optimizer_name`` can be specified if there exists more than one optimizer (e.g., in seq2seq models). """ |
if param == 'lr':
param_group = 'all'
param_name = 'lr'
net.lr = value
else:
param_group, param_name = _extract_optimizer_param_name_and_group(
optimizer_name, param)
_set_optimizer_param(
optimizer=getattr(net, optimizer_attr),
param_group=param_group,
param_name=param_name,
value=value
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_lr(name, optimizer, lr):
"""Return one learning rate for each param group.""" |
n = len(optimizer.param_groups)
if not isinstance(lr, (list, tuple)):
return lr * np.ones(n)
if len(lr) != n:
raise ValueError("{} lr values were passed for {} but there are "
"{} param groups.".format(n, name, len(lr)))
return np.array(lr) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.