repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
fmfn/BayesianOptimization
bayes_opt/bayesian_optimization.py
BayesianOptimization.register
def register(self, params, target): """Expect observation with known target""" self._space.register(params, target) self.dispatch(Events.OPTMIZATION_STEP)
python
def register(self, params, target): """Expect observation with known target""" self._space.register(params, target) self.dispatch(Events.OPTMIZATION_STEP)
[ "def", "register", "(", "self", ",", "params", ",", "target", ")", ":", "self", ".", "_space", ".", "register", "(", "params", ",", "target", ")", "self", ".", "dispatch", "(", "Events", ".", "OPTMIZATION_STEP", ")" ]
Expect observation with known target
[ "Expect", "observation", "with", "known", "target" ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L102-L105
train
fmfn/BayesianOptimization
bayes_opt/bayesian_optimization.py
BayesianOptimization.probe
def probe(self, params, lazy=True): """Probe target of x""" if lazy: self._queue.add(params) else: self._space.probe(params) self.dispatch(Events.OPTMIZATION_STEP)
python
def probe(self, params, lazy=True): """Probe target of x""" if lazy: self._queue.add(params) else: self._space.probe(params) self.dispatch(Events.OPTMIZATION_STEP)
[ "def", "probe", "(", "self", ",", "params", ",", "lazy", "=", "True", ")", ":", "if", "lazy", ":", "self", ".", "_queue", ".", "add", "(", "params", ")", "else", ":", "self", ".", "_space", ".", "probe", "(", "params", ")", "self", ".", "dispatch", "(", "Events", ".", "OPTMIZATION_STEP", ")" ]
Probe target of x
[ "Probe", "target", "of", "x" ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L107-L113
train
fmfn/BayesianOptimization
bayes_opt/bayesian_optimization.py
BayesianOptimization.suggest
def suggest(self, utility_function): """Most promissing point to probe next""" if len(self._space) == 0: return self._space.array_to_params(self._space.random_sample()) # Sklearn's GP throws a large number of warnings at times, but # we don't really need to see them here. with warnings.catch_warnings(): warnings.simplefilter("ignore") self._gp.fit(self._space.params, self._space.target) # Finding argmax of the acquisition function. suggestion = acq_max( ac=utility_function.utility, gp=self._gp, y_max=self._space.target.max(), bounds=self._space.bounds, random_state=self._random_state ) return self._space.array_to_params(suggestion)
python
def suggest(self, utility_function): """Most promissing point to probe next""" if len(self._space) == 0: return self._space.array_to_params(self._space.random_sample()) # Sklearn's GP throws a large number of warnings at times, but # we don't really need to see them here. with warnings.catch_warnings(): warnings.simplefilter("ignore") self._gp.fit(self._space.params, self._space.target) # Finding argmax of the acquisition function. suggestion = acq_max( ac=utility_function.utility, gp=self._gp, y_max=self._space.target.max(), bounds=self._space.bounds, random_state=self._random_state ) return self._space.array_to_params(suggestion)
[ "def", "suggest", "(", "self", ",", "utility_function", ")", ":", "if", "len", "(", "self", ".", "_space", ")", "==", "0", ":", "return", "self", ".", "_space", ".", "array_to_params", "(", "self", ".", "_space", ".", "random_sample", "(", ")", ")", "# Sklearn's GP throws a large number of warnings at times, but", "# we don't really need to see them here.", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "self", ".", "_gp", ".", "fit", "(", "self", ".", "_space", ".", "params", ",", "self", ".", "_space", ".", "target", ")", "# Finding argmax of the acquisition function.", "suggestion", "=", "acq_max", "(", "ac", "=", "utility_function", ".", "utility", ",", "gp", "=", "self", ".", "_gp", ",", "y_max", "=", "self", ".", "_space", ".", "target", ".", "max", "(", ")", ",", "bounds", "=", "self", ".", "_space", ".", "bounds", ",", "random_state", "=", "self", ".", "_random_state", ")", "return", "self", ".", "_space", ".", "array_to_params", "(", "suggestion", ")" ]
Most promissing point to probe next
[ "Most", "promissing", "point", "to", "probe", "next" ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L115-L135
train
fmfn/BayesianOptimization
bayes_opt/bayesian_optimization.py
BayesianOptimization._prime_queue
def _prime_queue(self, init_points): """Make sure there's something in the queue at the very beginning.""" if self._queue.empty and self._space.empty: init_points = max(init_points, 1) for _ in range(init_points): self._queue.add(self._space.random_sample())
python
def _prime_queue(self, init_points): """Make sure there's something in the queue at the very beginning.""" if self._queue.empty and self._space.empty: init_points = max(init_points, 1) for _ in range(init_points): self._queue.add(self._space.random_sample())
[ "def", "_prime_queue", "(", "self", ",", "init_points", ")", ":", "if", "self", ".", "_queue", ".", "empty", "and", "self", ".", "_space", ".", "empty", ":", "init_points", "=", "max", "(", "init_points", ",", "1", ")", "for", "_", "in", "range", "(", "init_points", ")", ":", "self", ".", "_queue", ".", "add", "(", "self", ".", "_space", ".", "random_sample", "(", ")", ")" ]
Make sure there's something in the queue at the very beginning.
[ "Make", "sure", "there", "s", "something", "in", "the", "queue", "at", "the", "very", "beginning", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L137-L143
train
fmfn/BayesianOptimization
bayes_opt/bayesian_optimization.py
BayesianOptimization.maximize
def maximize(self, init_points=5, n_iter=25, acq='ucb', kappa=2.576, xi=0.0, **gp_params): """Mazimize your function""" self._prime_subscriptions() self.dispatch(Events.OPTMIZATION_START) self._prime_queue(init_points) self.set_gp_params(**gp_params) util = UtilityFunction(kind=acq, kappa=kappa, xi=xi) iteration = 0 while not self._queue.empty or iteration < n_iter: try: x_probe = next(self._queue) except StopIteration: x_probe = self.suggest(util) iteration += 1 self.probe(x_probe, lazy=False) self.dispatch(Events.OPTMIZATION_END)
python
def maximize(self, init_points=5, n_iter=25, acq='ucb', kappa=2.576, xi=0.0, **gp_params): """Mazimize your function""" self._prime_subscriptions() self.dispatch(Events.OPTMIZATION_START) self._prime_queue(init_points) self.set_gp_params(**gp_params) util = UtilityFunction(kind=acq, kappa=kappa, xi=xi) iteration = 0 while not self._queue.empty or iteration < n_iter: try: x_probe = next(self._queue) except StopIteration: x_probe = self.suggest(util) iteration += 1 self.probe(x_probe, lazy=False) self.dispatch(Events.OPTMIZATION_END)
[ "def", "maximize", "(", "self", ",", "init_points", "=", "5", ",", "n_iter", "=", "25", ",", "acq", "=", "'ucb'", ",", "kappa", "=", "2.576", ",", "xi", "=", "0.0", ",", "*", "*", "gp_params", ")", ":", "self", ".", "_prime_subscriptions", "(", ")", "self", ".", "dispatch", "(", "Events", ".", "OPTMIZATION_START", ")", "self", ".", "_prime_queue", "(", "init_points", ")", "self", ".", "set_gp_params", "(", "*", "*", "gp_params", ")", "util", "=", "UtilityFunction", "(", "kind", "=", "acq", ",", "kappa", "=", "kappa", ",", "xi", "=", "xi", ")", "iteration", "=", "0", "while", "not", "self", ".", "_queue", ".", "empty", "or", "iteration", "<", "n_iter", ":", "try", ":", "x_probe", "=", "next", "(", "self", ".", "_queue", ")", "except", "StopIteration", ":", "x_probe", "=", "self", ".", "suggest", "(", "util", ")", "iteration", "+=", "1", "self", ".", "probe", "(", "x_probe", ",", "lazy", "=", "False", ")", "self", ".", "dispatch", "(", "Events", ".", "OPTMIZATION_END", ")" ]
Mazimize your function
[ "Mazimize", "your", "function" ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L152-L176
train
fmfn/BayesianOptimization
bayes_opt/target_space.py
TargetSpace.register
def register(self, params, target): """ Append a point and its target value to the known data. Parameters ---------- x : ndarray a single point, with len(x) == self.dim y : float target function value Raises ------ KeyError: if the point is not unique Notes ----- runs in ammortized constant time Example ------- >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} >>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds) >>> len(space) 0 >>> x = np.array([0, 0]) >>> y = 1 >>> space.add_observation(x, y) >>> len(space) 1 """ x = self._as_array(params) if x in self: raise KeyError('Data point {} is not unique'.format(x)) # Insert data into unique dictionary self._cache[_hashable(x.ravel())] = target self._params = np.concatenate([self._params, x.reshape(1, -1)]) self._target = np.concatenate([self._target, [target]])
python
def register(self, params, target): """ Append a point and its target value to the known data. Parameters ---------- x : ndarray a single point, with len(x) == self.dim y : float target function value Raises ------ KeyError: if the point is not unique Notes ----- runs in ammortized constant time Example ------- >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} >>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds) >>> len(space) 0 >>> x = np.array([0, 0]) >>> y = 1 >>> space.add_observation(x, y) >>> len(space) 1 """ x = self._as_array(params) if x in self: raise KeyError('Data point {} is not unique'.format(x)) # Insert data into unique dictionary self._cache[_hashable(x.ravel())] = target self._params = np.concatenate([self._params, x.reshape(1, -1)]) self._target = np.concatenate([self._target, [target]])
[ "def", "register", "(", "self", ",", "params", ",", "target", ")", ":", "x", "=", "self", ".", "_as_array", "(", "params", ")", "if", "x", "in", "self", ":", "raise", "KeyError", "(", "'Data point {} is not unique'", ".", "format", "(", "x", ")", ")", "# Insert data into unique dictionary", "self", ".", "_cache", "[", "_hashable", "(", "x", ".", "ravel", "(", ")", ")", "]", "=", "target", "self", ".", "_params", "=", "np", ".", "concatenate", "(", "[", "self", ".", "_params", ",", "x", ".", "reshape", "(", "1", ",", "-", "1", ")", "]", ")", "self", ".", "_target", "=", "np", ".", "concatenate", "(", "[", "self", ".", "_target", ",", "[", "target", "]", "]", ")" ]
Append a point and its target value to the known data. Parameters ---------- x : ndarray a single point, with len(x) == self.dim y : float target function value Raises ------ KeyError: if the point is not unique Notes ----- runs in ammortized constant time Example ------- >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} >>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds) >>> len(space) 0 >>> x = np.array([0, 0]) >>> y = 1 >>> space.add_observation(x, y) >>> len(space) 1
[ "Append", "a", "point", "and", "its", "target", "value", "to", "the", "known", "data", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L126-L167
train
fmfn/BayesianOptimization
bayes_opt/target_space.py
TargetSpace.probe
def probe(self, params): """ Evaulates a single point x, to obtain the value y and then records them as observations. Notes ----- If x has been previously seen returns a cached value of y. Parameters ---------- x : ndarray a single point, with len(x) == self.dim Returns ------- y : float target function value. """ x = self._as_array(params) try: target = self._cache[_hashable(x)] except KeyError: params = dict(zip(self._keys, x)) target = self.target_func(**params) self.register(x, target) return target
python
def probe(self, params): """ Evaulates a single point x, to obtain the value y and then records them as observations. Notes ----- If x has been previously seen returns a cached value of y. Parameters ---------- x : ndarray a single point, with len(x) == self.dim Returns ------- y : float target function value. """ x = self._as_array(params) try: target = self._cache[_hashable(x)] except KeyError: params = dict(zip(self._keys, x)) target = self.target_func(**params) self.register(x, target) return target
[ "def", "probe", "(", "self", ",", "params", ")", ":", "x", "=", "self", ".", "_as_array", "(", "params", ")", "try", ":", "target", "=", "self", ".", "_cache", "[", "_hashable", "(", "x", ")", "]", "except", "KeyError", ":", "params", "=", "dict", "(", "zip", "(", "self", ".", "_keys", ",", "x", ")", ")", "target", "=", "self", ".", "target_func", "(", "*", "*", "params", ")", "self", ".", "register", "(", "x", ",", "target", ")", "return", "target" ]
Evaulates a single point x, to obtain the value y and then records them as observations. Notes ----- If x has been previously seen returns a cached value of y. Parameters ---------- x : ndarray a single point, with len(x) == self.dim Returns ------- y : float target function value.
[ "Evaulates", "a", "single", "point", "x", "to", "obtain", "the", "value", "y", "and", "then", "records", "them", "as", "observations", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L169-L196
train
fmfn/BayesianOptimization
bayes_opt/target_space.py
TargetSpace.random_sample
def random_sample(self): """ Creates random points within the bounds of the space. Returns ---------- data: ndarray [num x dim] array points with dimensions corresponding to `self._keys` Example ------- >>> target_func = lambda p1, p2: p1 + p2 >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} >>> space = TargetSpace(target_func, pbounds, random_state=0) >>> space.random_points(1) array([[ 55.33253689, 0.54488318]]) """ # TODO: support integer, category, and basic scipy.optimize constraints data = np.empty((1, self.dim)) for col, (lower, upper) in enumerate(self._bounds): data.T[col] = self.random_state.uniform(lower, upper, size=1) return data.ravel()
python
def random_sample(self): """ Creates random points within the bounds of the space. Returns ---------- data: ndarray [num x dim] array points with dimensions corresponding to `self._keys` Example ------- >>> target_func = lambda p1, p2: p1 + p2 >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} >>> space = TargetSpace(target_func, pbounds, random_state=0) >>> space.random_points(1) array([[ 55.33253689, 0.54488318]]) """ # TODO: support integer, category, and basic scipy.optimize constraints data = np.empty((1, self.dim)) for col, (lower, upper) in enumerate(self._bounds): data.T[col] = self.random_state.uniform(lower, upper, size=1) return data.ravel()
[ "def", "random_sample", "(", "self", ")", ":", "# TODO: support integer, category, and basic scipy.optimize constraints", "data", "=", "np", ".", "empty", "(", "(", "1", ",", "self", ".", "dim", ")", ")", "for", "col", ",", "(", "lower", ",", "upper", ")", "in", "enumerate", "(", "self", ".", "_bounds", ")", ":", "data", ".", "T", "[", "col", "]", "=", "self", ".", "random_state", ".", "uniform", "(", "lower", ",", "upper", ",", "size", "=", "1", ")", "return", "data", ".", "ravel", "(", ")" ]
Creates random points within the bounds of the space. Returns ---------- data: ndarray [num x dim] array points with dimensions corresponding to `self._keys` Example ------- >>> target_func = lambda p1, p2: p1 + p2 >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} >>> space = TargetSpace(target_func, pbounds, random_state=0) >>> space.random_points(1) array([[ 55.33253689, 0.54488318]])
[ "Creates", "random", "points", "within", "the", "bounds", "of", "the", "space", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L198-L219
train
fmfn/BayesianOptimization
bayes_opt/target_space.py
TargetSpace.max
def max(self): """Get maximum target value found and corresponding parametes.""" try: res = { 'target': self.target.max(), 'params': dict( zip(self.keys, self.params[self.target.argmax()]) ) } except ValueError: res = {} return res
python
def max(self): """Get maximum target value found and corresponding parametes.""" try: res = { 'target': self.target.max(), 'params': dict( zip(self.keys, self.params[self.target.argmax()]) ) } except ValueError: res = {} return res
[ "def", "max", "(", "self", ")", ":", "try", ":", "res", "=", "{", "'target'", ":", "self", ".", "target", ".", "max", "(", ")", ",", "'params'", ":", "dict", "(", "zip", "(", "self", ".", "keys", ",", "self", ".", "params", "[", "self", ".", "target", ".", "argmax", "(", ")", "]", ")", ")", "}", "except", "ValueError", ":", "res", "=", "{", "}", "return", "res" ]
Get maximum target value found and corresponding parametes.
[ "Get", "maximum", "target", "value", "found", "and", "corresponding", "parametes", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L221-L232
train
fmfn/BayesianOptimization
bayes_opt/target_space.py
TargetSpace.res
def res(self): """Get all target values found and corresponding parametes.""" params = [dict(zip(self.keys, p)) for p in self.params] return [ {"target": target, "params": param} for target, param in zip(self.target, params) ]
python
def res(self): """Get all target values found and corresponding parametes.""" params = [dict(zip(self.keys, p)) for p in self.params] return [ {"target": target, "params": param} for target, param in zip(self.target, params) ]
[ "def", "res", "(", "self", ")", ":", "params", "=", "[", "dict", "(", "zip", "(", "self", ".", "keys", ",", "p", ")", ")", "for", "p", "in", "self", ".", "params", "]", "return", "[", "{", "\"target\"", ":", "target", ",", "\"params\"", ":", "param", "}", "for", "target", ",", "param", "in", "zip", "(", "self", ".", "target", ",", "params", ")", "]" ]
Get all target values found and corresponding parametes.
[ "Get", "all", "target", "values", "found", "and", "corresponding", "parametes", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L234-L241
train
fmfn/BayesianOptimization
bayes_opt/target_space.py
TargetSpace.set_bounds
def set_bounds(self, new_bounds): """ A method that allows changing the lower and upper searching bounds Parameters ---------- new_bounds : dict A dictionary with the parameter name and its new bounds """ for row, key in enumerate(self.keys): if key in new_bounds: self._bounds[row] = new_bounds[key]
python
def set_bounds(self, new_bounds): """ A method that allows changing the lower and upper searching bounds Parameters ---------- new_bounds : dict A dictionary with the parameter name and its new bounds """ for row, key in enumerate(self.keys): if key in new_bounds: self._bounds[row] = new_bounds[key]
[ "def", "set_bounds", "(", "self", ",", "new_bounds", ")", ":", "for", "row", ",", "key", "in", "enumerate", "(", "self", ".", "keys", ")", ":", "if", "key", "in", "new_bounds", ":", "self", ".", "_bounds", "[", "row", "]", "=", "new_bounds", "[", "key", "]" ]
A method that allows changing the lower and upper searching bounds Parameters ---------- new_bounds : dict A dictionary with the parameter name and its new bounds
[ "A", "method", "that", "allows", "changing", "the", "lower", "and", "upper", "searching", "bounds" ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L243-L254
train
fmfn/BayesianOptimization
examples/sklearn_example.py
get_data
def get_data(): """Synthetic binary classification dataset.""" data, targets = make_classification( n_samples=1000, n_features=45, n_informative=12, n_redundant=7, random_state=134985745, ) return data, targets
python
def get_data(): """Synthetic binary classification dataset.""" data, targets = make_classification( n_samples=1000, n_features=45, n_informative=12, n_redundant=7, random_state=134985745, ) return data, targets
[ "def", "get_data", "(", ")", ":", "data", ",", "targets", "=", "make_classification", "(", "n_samples", "=", "1000", ",", "n_features", "=", "45", ",", "n_informative", "=", "12", ",", "n_redundant", "=", "7", ",", "random_state", "=", "134985745", ",", ")", "return", "data", ",", "targets" ]
Synthetic binary classification dataset.
[ "Synthetic", "binary", "classification", "dataset", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L9-L18
train
fmfn/BayesianOptimization
examples/sklearn_example.py
svc_cv
def svc_cv(C, gamma, data, targets): """SVC cross validation. This function will instantiate a SVC classifier with parameters C and gamma. Combined with data and targets this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of C and gamma that maximizes the roc_auc metric. """ estimator = SVC(C=C, gamma=gamma, random_state=2) cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4) return cval.mean()
python
def svc_cv(C, gamma, data, targets): """SVC cross validation. This function will instantiate a SVC classifier with parameters C and gamma. Combined with data and targets this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of C and gamma that maximizes the roc_auc metric. """ estimator = SVC(C=C, gamma=gamma, random_state=2) cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4) return cval.mean()
[ "def", "svc_cv", "(", "C", ",", "gamma", ",", "data", ",", "targets", ")", ":", "estimator", "=", "SVC", "(", "C", "=", "C", ",", "gamma", "=", "gamma", ",", "random_state", "=", "2", ")", "cval", "=", "cross_val_score", "(", "estimator", ",", "data", ",", "targets", ",", "scoring", "=", "'roc_auc'", ",", "cv", "=", "4", ")", "return", "cval", ".", "mean", "(", ")" ]
SVC cross validation. This function will instantiate a SVC classifier with parameters C and gamma. Combined with data and targets this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of C and gamma that maximizes the roc_auc metric.
[ "SVC", "cross", "validation", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L21-L33
train
fmfn/BayesianOptimization
examples/sklearn_example.py
rfc_cv
def rfc_cv(n_estimators, min_samples_split, max_features, data, targets): """Random Forest cross validation. This function will instantiate a random forest classifier with parameters n_estimators, min_samples_split, and max_features. Combined with data and targets this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of n_estimators, min_samples_split, and max_features that minimzes the log loss. """ estimator = RFC( n_estimators=n_estimators, min_samples_split=min_samples_split, max_features=max_features, random_state=2 ) cval = cross_val_score(estimator, data, targets, scoring='neg_log_loss', cv=4) return cval.mean()
python
def rfc_cv(n_estimators, min_samples_split, max_features, data, targets): """Random Forest cross validation. This function will instantiate a random forest classifier with parameters n_estimators, min_samples_split, and max_features. Combined with data and targets this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of n_estimators, min_samples_split, and max_features that minimzes the log loss. """ estimator = RFC( n_estimators=n_estimators, min_samples_split=min_samples_split, max_features=max_features, random_state=2 ) cval = cross_val_score(estimator, data, targets, scoring='neg_log_loss', cv=4) return cval.mean()
[ "def", "rfc_cv", "(", "n_estimators", ",", "min_samples_split", ",", "max_features", ",", "data", ",", "targets", ")", ":", "estimator", "=", "RFC", "(", "n_estimators", "=", "n_estimators", ",", "min_samples_split", "=", "min_samples_split", ",", "max_features", "=", "max_features", ",", "random_state", "=", "2", ")", "cval", "=", "cross_val_score", "(", "estimator", ",", "data", ",", "targets", ",", "scoring", "=", "'neg_log_loss'", ",", "cv", "=", "4", ")", "return", "cval", ".", "mean", "(", ")" ]
Random Forest cross validation. This function will instantiate a random forest classifier with parameters n_estimators, min_samples_split, and max_features. Combined with data and targets this will in turn be used to perform cross validation. The result of cross validation is returned. Our goal is to find combinations of n_estimators, min_samples_split, and max_features that minimzes the log loss.
[ "Random", "Forest", "cross", "validation", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L36-L55
train
fmfn/BayesianOptimization
examples/sklearn_example.py
optimize_svc
def optimize_svc(data, targets): """Apply Bayesian Optimization to SVC parameters.""" def svc_crossval(expC, expGamma): """Wrapper of SVC cross validation. Notice how we transform between regular and log scale. While this is not technically necessary, it greatly improves the performance of the optimizer. """ C = 10 ** expC gamma = 10 ** expGamma return svc_cv(C=C, gamma=gamma, data=data, targets=targets) optimizer = BayesianOptimization( f=svc_crossval, pbounds={"expC": (-3, 2), "expGamma": (-4, -1)}, random_state=1234, verbose=2 ) optimizer.maximize(n_iter=10) print("Final result:", optimizer.max)
python
def optimize_svc(data, targets): """Apply Bayesian Optimization to SVC parameters.""" def svc_crossval(expC, expGamma): """Wrapper of SVC cross validation. Notice how we transform between regular and log scale. While this is not technically necessary, it greatly improves the performance of the optimizer. """ C = 10 ** expC gamma = 10 ** expGamma return svc_cv(C=C, gamma=gamma, data=data, targets=targets) optimizer = BayesianOptimization( f=svc_crossval, pbounds={"expC": (-3, 2), "expGamma": (-4, -1)}, random_state=1234, verbose=2 ) optimizer.maximize(n_iter=10) print("Final result:", optimizer.max)
[ "def", "optimize_svc", "(", "data", ",", "targets", ")", ":", "def", "svc_crossval", "(", "expC", ",", "expGamma", ")", ":", "\"\"\"Wrapper of SVC cross validation.\n\n Notice how we transform between regular and log scale. While this\n is not technically necessary, it greatly improves the performance\n of the optimizer.\n \"\"\"", "C", "=", "10", "**", "expC", "gamma", "=", "10", "**", "expGamma", "return", "svc_cv", "(", "C", "=", "C", ",", "gamma", "=", "gamma", ",", "data", "=", "data", ",", "targets", "=", "targets", ")", "optimizer", "=", "BayesianOptimization", "(", "f", "=", "svc_crossval", ",", "pbounds", "=", "{", "\"expC\"", ":", "(", "-", "3", ",", "2", ")", ",", "\"expGamma\"", ":", "(", "-", "4", ",", "-", "1", ")", "}", ",", "random_state", "=", "1234", ",", "verbose", "=", "2", ")", "optimizer", ".", "maximize", "(", "n_iter", "=", "10", ")", "print", "(", "\"Final result:\"", ",", "optimizer", ".", "max", ")" ]
Apply Bayesian Optimization to SVC parameters.
[ "Apply", "Bayesian", "Optimization", "to", "SVC", "parameters", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L58-L79
train
fmfn/BayesianOptimization
examples/sklearn_example.py
optimize_rfc
def optimize_rfc(data, targets): """Apply Bayesian Optimization to Random Forest parameters.""" def rfc_crossval(n_estimators, min_samples_split, max_features): """Wrapper of RandomForest cross validation. Notice how we ensure n_estimators and min_samples_split are casted to integer before we pass them along. Moreover, to avoid max_features taking values outside the (0, 1) range, we also ensure it is capped accordingly. """ return rfc_cv( n_estimators=int(n_estimators), min_samples_split=int(min_samples_split), max_features=max(min(max_features, 0.999), 1e-3), data=data, targets=targets, ) optimizer = BayesianOptimization( f=rfc_crossval, pbounds={ "n_estimators": (10, 250), "min_samples_split": (2, 25), "max_features": (0.1, 0.999), }, random_state=1234, verbose=2 ) optimizer.maximize(n_iter=10) print("Final result:", optimizer.max)
python
def optimize_rfc(data, targets): """Apply Bayesian Optimization to Random Forest parameters.""" def rfc_crossval(n_estimators, min_samples_split, max_features): """Wrapper of RandomForest cross validation. Notice how we ensure n_estimators and min_samples_split are casted to integer before we pass them along. Moreover, to avoid max_features taking values outside the (0, 1) range, we also ensure it is capped accordingly. """ return rfc_cv( n_estimators=int(n_estimators), min_samples_split=int(min_samples_split), max_features=max(min(max_features, 0.999), 1e-3), data=data, targets=targets, ) optimizer = BayesianOptimization( f=rfc_crossval, pbounds={ "n_estimators": (10, 250), "min_samples_split": (2, 25), "max_features": (0.1, 0.999), }, random_state=1234, verbose=2 ) optimizer.maximize(n_iter=10) print("Final result:", optimizer.max)
[ "def", "optimize_rfc", "(", "data", ",", "targets", ")", ":", "def", "rfc_crossval", "(", "n_estimators", ",", "min_samples_split", ",", "max_features", ")", ":", "\"\"\"Wrapper of RandomForest cross validation.\n\n Notice how we ensure n_estimators and min_samples_split are casted\n to integer before we pass them along. Moreover, to avoid max_features\n taking values outside the (0, 1) range, we also ensure it is capped\n accordingly.\n \"\"\"", "return", "rfc_cv", "(", "n_estimators", "=", "int", "(", "n_estimators", ")", ",", "min_samples_split", "=", "int", "(", "min_samples_split", ")", ",", "max_features", "=", "max", "(", "min", "(", "max_features", ",", "0.999", ")", ",", "1e-3", ")", ",", "data", "=", "data", ",", "targets", "=", "targets", ",", ")", "optimizer", "=", "BayesianOptimization", "(", "f", "=", "rfc_crossval", ",", "pbounds", "=", "{", "\"n_estimators\"", ":", "(", "10", ",", "250", ")", ",", "\"min_samples_split\"", ":", "(", "2", ",", "25", ")", ",", "\"max_features\"", ":", "(", "0.1", ",", "0.999", ")", ",", "}", ",", "random_state", "=", "1234", ",", "verbose", "=", "2", ")", "optimizer", ".", "maximize", "(", "n_iter", "=", "10", ")", "print", "(", "\"Final result:\"", ",", "optimizer", ".", "max", ")" ]
Apply Bayesian Optimization to Random Forest parameters.
[ "Apply", "Bayesian", "Optimization", "to", "Random", "Forest", "parameters", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L82-L112
train
fmfn/BayesianOptimization
bayes_opt/util.py
acq_max
def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250): """ A function to find the maximum of the acquisition function It uses a combination of random sampling (cheap) and the 'L-BFGS-B' optimization method. First by sampling `n_warmup` (1e5) points at random, and then running L-BFGS-B from `n_iter` (250) random starting points. Parameters ---------- :param ac: The acquisition function object that return its point-wise value. :param gp: A gaussian process fitted to the relevant data. :param y_max: The current maximum known value of the target function. :param bounds: The variables bounds to limit the search of the acq max. :param random_state: instance of np.RandomState random number generator :param n_warmup: number of times to randomly sample the aquisition function :param n_iter: number of times to run scipy.minimize Returns ------- :return: x_max, The arg max of the acquisition function. """ # Warm up with random points x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1], size=(n_warmup, bounds.shape[0])) ys = ac(x_tries, gp=gp, y_max=y_max) x_max = x_tries[ys.argmax()] max_acq = ys.max() # Explore the parameter space more throughly x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1], size=(n_iter, bounds.shape[0])) for x_try in x_seeds: # Find the minimum of minus the acquisition function res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") # See if success if not res.success: continue # Store it if better than previous minimum(maximum). if max_acq is None or -res.fun[0] >= max_acq: x_max = res.x max_acq = -res.fun[0] # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. return np.clip(x_max, bounds[:, 0], bounds[:, 1])
python
def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250): """ A function to find the maximum of the acquisition function It uses a combination of random sampling (cheap) and the 'L-BFGS-B' optimization method. First by sampling `n_warmup` (1e5) points at random, and then running L-BFGS-B from `n_iter` (250) random starting points. Parameters ---------- :param ac: The acquisition function object that return its point-wise value. :param gp: A gaussian process fitted to the relevant data. :param y_max: The current maximum known value of the target function. :param bounds: The variables bounds to limit the search of the acq max. :param random_state: instance of np.RandomState random number generator :param n_warmup: number of times to randomly sample the aquisition function :param n_iter: number of times to run scipy.minimize Returns ------- :return: x_max, The arg max of the acquisition function. """ # Warm up with random points x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1], size=(n_warmup, bounds.shape[0])) ys = ac(x_tries, gp=gp, y_max=y_max) x_max = x_tries[ys.argmax()] max_acq = ys.max() # Explore the parameter space more throughly x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1], size=(n_iter, bounds.shape[0])) for x_try in x_seeds: # Find the minimum of minus the acquisition function res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") # See if success if not res.success: continue # Store it if better than previous minimum(maximum). if max_acq is None or -res.fun[0] >= max_acq: x_max = res.x max_acq = -res.fun[0] # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. return np.clip(x_max, bounds[:, 0], bounds[:, 1])
[ "def", "acq_max", "(", "ac", ",", "gp", ",", "y_max", ",", "bounds", ",", "random_state", ",", "n_warmup", "=", "100000", ",", "n_iter", "=", "250", ")", ":", "# Warm up with random points", "x_tries", "=", "random_state", ".", "uniform", "(", "bounds", "[", ":", ",", "0", "]", ",", "bounds", "[", ":", ",", "1", "]", ",", "size", "=", "(", "n_warmup", ",", "bounds", ".", "shape", "[", "0", "]", ")", ")", "ys", "=", "ac", "(", "x_tries", ",", "gp", "=", "gp", ",", "y_max", "=", "y_max", ")", "x_max", "=", "x_tries", "[", "ys", ".", "argmax", "(", ")", "]", "max_acq", "=", "ys", ".", "max", "(", ")", "# Explore the parameter space more throughly", "x_seeds", "=", "random_state", ".", "uniform", "(", "bounds", "[", ":", ",", "0", "]", ",", "bounds", "[", ":", ",", "1", "]", ",", "size", "=", "(", "n_iter", ",", "bounds", ".", "shape", "[", "0", "]", ")", ")", "for", "x_try", "in", "x_seeds", ":", "# Find the minimum of minus the acquisition function", "res", "=", "minimize", "(", "lambda", "x", ":", "-", "ac", "(", "x", ".", "reshape", "(", "1", ",", "-", "1", ")", ",", "gp", "=", "gp", ",", "y_max", "=", "y_max", ")", ",", "x_try", ".", "reshape", "(", "1", ",", "-", "1", ")", ",", "bounds", "=", "bounds", ",", "method", "=", "\"L-BFGS-B\"", ")", "# See if success", "if", "not", "res", ".", "success", ":", "continue", "# Store it if better than previous minimum(maximum).", "if", "max_acq", "is", "None", "or", "-", "res", ".", "fun", "[", "0", "]", ">=", "max_acq", ":", "x_max", "=", "res", ".", "x", "max_acq", "=", "-", "res", ".", "fun", "[", "0", "]", "# Clip output to make sure it lies within the bounds. Due to floating", "# point technicalities this is not always the case.", "return", "np", ".", "clip", "(", "x_max", ",", "bounds", "[", ":", ",", "0", "]", ",", "bounds", "[", ":", ",", "1", "]", ")" ]
A function to find the maximum of the acquisition function It uses a combination of random sampling (cheap) and the 'L-BFGS-B' optimization method. First by sampling `n_warmup` (1e5) points at random, and then running L-BFGS-B from `n_iter` (250) random starting points. Parameters ---------- :param ac: The acquisition function object that return its point-wise value. :param gp: A gaussian process fitted to the relevant data. :param y_max: The current maximum known value of the target function. :param bounds: The variables bounds to limit the search of the acq max. :param random_state: instance of np.RandomState random number generator :param n_warmup: number of times to randomly sample the aquisition function :param n_iter: number of times to run scipy.minimize Returns ------- :return: x_max, The arg max of the acquisition function.
[ "A", "function", "to", "find", "the", "maximum", "of", "the", "acquisition", "function" ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/util.py#L7-L71
train
fmfn/BayesianOptimization
bayes_opt/util.py
load_logs
def load_logs(optimizer, logs): """Load previous ... """ import json if isinstance(logs, str): logs = [logs] for log in logs: with open(log, "r") as j: while True: try: iteration = next(j) except StopIteration: break iteration = json.loads(iteration) try: optimizer.register( params=iteration["params"], target=iteration["target"], ) except KeyError: pass return optimizer
python
def load_logs(optimizer, logs): """Load previous ... """ import json if isinstance(logs, str): logs = [logs] for log in logs: with open(log, "r") as j: while True: try: iteration = next(j) except StopIteration: break iteration = json.loads(iteration) try: optimizer.register( params=iteration["params"], target=iteration["target"], ) except KeyError: pass return optimizer
[ "def", "load_logs", "(", "optimizer", ",", "logs", ")", ":", "import", "json", "if", "isinstance", "(", "logs", ",", "str", ")", ":", "logs", "=", "[", "logs", "]", "for", "log", "in", "logs", ":", "with", "open", "(", "log", ",", "\"r\"", ")", "as", "j", ":", "while", "True", ":", "try", ":", "iteration", "=", "next", "(", "j", ")", "except", "StopIteration", ":", "break", "iteration", "=", "json", ".", "loads", "(", "iteration", ")", "try", ":", "optimizer", ".", "register", "(", "params", "=", "iteration", "[", "\"params\"", "]", ",", "target", "=", "iteration", "[", "\"target\"", "]", ",", ")", "except", "KeyError", ":", "pass", "return", "optimizer" ]
Load previous ...
[ "Load", "previous", "..." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/util.py#L130-L156
train
fmfn/BayesianOptimization
bayes_opt/util.py
ensure_rng
def ensure_rng(random_state=None): """ Creates a random number generator based on an optional seed. This can be an integer or another random state for a seeded rng, or None for an unseeded rng. """ if random_state is None: random_state = np.random.RandomState() elif isinstance(random_state, int): random_state = np.random.RandomState(random_state) else: assert isinstance(random_state, np.random.RandomState) return random_state
python
def ensure_rng(random_state=None): """ Creates a random number generator based on an optional seed. This can be an integer or another random state for a seeded rng, or None for an unseeded rng. """ if random_state is None: random_state = np.random.RandomState() elif isinstance(random_state, int): random_state = np.random.RandomState(random_state) else: assert isinstance(random_state, np.random.RandomState) return random_state
[ "def", "ensure_rng", "(", "random_state", "=", "None", ")", ":", "if", "random_state", "is", "None", ":", "random_state", "=", "np", ".", "random", ".", "RandomState", "(", ")", "elif", "isinstance", "(", "random_state", ",", "int", ")", ":", "random_state", "=", "np", ".", "random", ".", "RandomState", "(", "random_state", ")", "else", ":", "assert", "isinstance", "(", "random_state", ",", "np", ".", "random", ".", "RandomState", ")", "return", "random_state" ]
Creates a random number generator based on an optional seed. This can be an integer or another random state for a seeded rng, or None for an unseeded rng.
[ "Creates", "a", "random", "number", "generator", "based", "on", "an", "optional", "seed", ".", "This", "can", "be", "an", "integer", "or", "another", "random", "state", "for", "a", "seeded", "rng", "or", "None", "for", "an", "unseeded", "rng", "." ]
8ce2292895137477963cf1bafa4e71fa20b2ce49
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/util.py#L159-L171
train
audreyr/cookiecutter
cookiecutter/repository.py
expand_abbreviations
def expand_abbreviations(template, abbreviations): """Expand abbreviations in a template name. :param template: The project template name. :param abbreviations: Abbreviation definitions. """ if template in abbreviations: return abbreviations[template] # Split on colon. If there is no colon, rest will be empty # and prefix will be the whole template prefix, sep, rest = template.partition(':') if prefix in abbreviations: return abbreviations[prefix].format(rest) return template
python
def expand_abbreviations(template, abbreviations): """Expand abbreviations in a template name. :param template: The project template name. :param abbreviations: Abbreviation definitions. """ if template in abbreviations: return abbreviations[template] # Split on colon. If there is no colon, rest will be empty # and prefix will be the whole template prefix, sep, rest = template.partition(':') if prefix in abbreviations: return abbreviations[prefix].format(rest) return template
[ "def", "expand_abbreviations", "(", "template", ",", "abbreviations", ")", ":", "if", "template", "in", "abbreviations", ":", "return", "abbreviations", "[", "template", "]", "# Split on colon. If there is no colon, rest will be empty", "# and prefix will be the whole template", "prefix", ",", "sep", ",", "rest", "=", "template", ".", "partition", "(", "':'", ")", "if", "prefix", "in", "abbreviations", ":", "return", "abbreviations", "[", "prefix", "]", ".", "format", "(", "rest", ")", "return", "template" ]
Expand abbreviations in a template name. :param template: The project template name. :param abbreviations: Abbreviation definitions.
[ "Expand", "abbreviations", "in", "a", "template", "name", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/repository.py#L32-L47
train
audreyr/cookiecutter
cookiecutter/repository.py
repository_has_cookiecutter_json
def repository_has_cookiecutter_json(repo_directory): """Determine if `repo_directory` contains a `cookiecutter.json` file. :param repo_directory: The candidate repository directory. :return: True if the `repo_directory` is valid, else False. """ repo_directory_exists = os.path.isdir(repo_directory) repo_config_exists = os.path.isfile( os.path.join(repo_directory, 'cookiecutter.json') ) return repo_directory_exists and repo_config_exists
python
def repository_has_cookiecutter_json(repo_directory): """Determine if `repo_directory` contains a `cookiecutter.json` file. :param repo_directory: The candidate repository directory. :return: True if the `repo_directory` is valid, else False. """ repo_directory_exists = os.path.isdir(repo_directory) repo_config_exists = os.path.isfile( os.path.join(repo_directory, 'cookiecutter.json') ) return repo_directory_exists and repo_config_exists
[ "def", "repository_has_cookiecutter_json", "(", "repo_directory", ")", ":", "repo_directory_exists", "=", "os", ".", "path", ".", "isdir", "(", "repo_directory", ")", "repo_config_exists", "=", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "repo_directory", ",", "'cookiecutter.json'", ")", ")", "return", "repo_directory_exists", "and", "repo_config_exists" ]
Determine if `repo_directory` contains a `cookiecutter.json` file. :param repo_directory: The candidate repository directory. :return: True if the `repo_directory` is valid, else False.
[ "Determine", "if", "repo_directory", "contains", "a", "cookiecutter", ".", "json", "file", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/repository.py#L50-L61
train
audreyr/cookiecutter
cookiecutter/repository.py
determine_repo_dir
def determine_repo_dir(template, abbreviations, clone_to_dir, checkout, no_input, password=None): """ Locate the repository directory from a template reference. Applies repository abbreviations to the template reference. If the template refers to a repository URL, clone it. If the template is a path to a local repository, use it. :param template: A directory containing a project template directory, or a URL to a git repository. :param abbreviations: A dictionary of repository abbreviation definitions. :param clone_to_dir: The directory to clone the repository into. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param password: The password to use when extracting the repository. :return: A tuple containing the cookiecutter template directory, and a boolean descriving whether that directory should be cleaned up after the template has been instantiated. :raises: `RepositoryNotFound` if a repository directory could not be found. """ template = expand_abbreviations(template, abbreviations) if is_zip_file(template): unzipped_dir = unzip( zip_uri=template, is_url=is_repo_url(template), clone_to_dir=clone_to_dir, no_input=no_input, password=password ) repository_candidates = [unzipped_dir] cleanup = True elif is_repo_url(template): cloned_repo = clone( repo_url=template, checkout=checkout, clone_to_dir=clone_to_dir, no_input=no_input, ) repository_candidates = [cloned_repo] cleanup = False else: repository_candidates = [ template, os.path.join(clone_to_dir, template) ] cleanup = False for repo_candidate in repository_candidates: if repository_has_cookiecutter_json(repo_candidate): return repo_candidate, cleanup raise RepositoryNotFound( 'A valid repository for "{}" could not be found in the following ' 'locations:\n{}'.format( template, '\n'.join(repository_candidates) ) )
python
def determine_repo_dir(template, abbreviations, clone_to_dir, checkout, no_input, password=None): """ Locate the repository directory from a template reference. Applies repository abbreviations to the template reference. If the template refers to a repository URL, clone it. If the template is a path to a local repository, use it. :param template: A directory containing a project template directory, or a URL to a git repository. :param abbreviations: A dictionary of repository abbreviation definitions. :param clone_to_dir: The directory to clone the repository into. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param password: The password to use when extracting the repository. :return: A tuple containing the cookiecutter template directory, and a boolean descriving whether that directory should be cleaned up after the template has been instantiated. :raises: `RepositoryNotFound` if a repository directory could not be found. """ template = expand_abbreviations(template, abbreviations) if is_zip_file(template): unzipped_dir = unzip( zip_uri=template, is_url=is_repo_url(template), clone_to_dir=clone_to_dir, no_input=no_input, password=password ) repository_candidates = [unzipped_dir] cleanup = True elif is_repo_url(template): cloned_repo = clone( repo_url=template, checkout=checkout, clone_to_dir=clone_to_dir, no_input=no_input, ) repository_candidates = [cloned_repo] cleanup = False else: repository_candidates = [ template, os.path.join(clone_to_dir, template) ] cleanup = False for repo_candidate in repository_candidates: if repository_has_cookiecutter_json(repo_candidate): return repo_candidate, cleanup raise RepositoryNotFound( 'A valid repository for "{}" could not be found in the following ' 'locations:\n{}'.format( template, '\n'.join(repository_candidates) ) )
[ "def", "determine_repo_dir", "(", "template", ",", "abbreviations", ",", "clone_to_dir", ",", "checkout", ",", "no_input", ",", "password", "=", "None", ")", ":", "template", "=", "expand_abbreviations", "(", "template", ",", "abbreviations", ")", "if", "is_zip_file", "(", "template", ")", ":", "unzipped_dir", "=", "unzip", "(", "zip_uri", "=", "template", ",", "is_url", "=", "is_repo_url", "(", "template", ")", ",", "clone_to_dir", "=", "clone_to_dir", ",", "no_input", "=", "no_input", ",", "password", "=", "password", ")", "repository_candidates", "=", "[", "unzipped_dir", "]", "cleanup", "=", "True", "elif", "is_repo_url", "(", "template", ")", ":", "cloned_repo", "=", "clone", "(", "repo_url", "=", "template", ",", "checkout", "=", "checkout", ",", "clone_to_dir", "=", "clone_to_dir", ",", "no_input", "=", "no_input", ",", ")", "repository_candidates", "=", "[", "cloned_repo", "]", "cleanup", "=", "False", "else", ":", "repository_candidates", "=", "[", "template", ",", "os", ".", "path", ".", "join", "(", "clone_to_dir", ",", "template", ")", "]", "cleanup", "=", "False", "for", "repo_candidate", "in", "repository_candidates", ":", "if", "repository_has_cookiecutter_json", "(", "repo_candidate", ")", ":", "return", "repo_candidate", ",", "cleanup", "raise", "RepositoryNotFound", "(", "'A valid repository for \"{}\" could not be found in the following '", "'locations:\\n{}'", ".", "format", "(", "template", ",", "'\\n'", ".", "join", "(", "repository_candidates", ")", ")", ")" ]
Locate the repository directory from a template reference. Applies repository abbreviations to the template reference. If the template refers to a repository URL, clone it. If the template is a path to a local repository, use it. :param template: A directory containing a project template directory, or a URL to a git repository. :param abbreviations: A dictionary of repository abbreviation definitions. :param clone_to_dir: The directory to clone the repository into. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param password: The password to use when extracting the repository. :return: A tuple containing the cookiecutter template directory, and a boolean descriving whether that directory should be cleaned up after the template has been instantiated. :raises: `RepositoryNotFound` if a repository directory could not be found.
[ "Locate", "the", "repository", "directory", "from", "a", "template", "reference", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/repository.py#L64-L124
train
audreyr/cookiecutter
cookiecutter/find.py
find_template
def find_template(repo_dir): """Determine which child directory of `repo_dir` is the project template. :param repo_dir: Local directory of newly cloned repo. :returns project_template: Relative path to project template. """ logger.debug('Searching {} for the project template.'.format(repo_dir)) repo_dir_contents = os.listdir(repo_dir) project_template = None for item in repo_dir_contents: if 'cookiecutter' in item and '{{' in item and '}}' in item: project_template = item break if project_template: project_template = os.path.join(repo_dir, project_template) logger.debug( 'The project template appears to be {}'.format(project_template) ) return project_template else: raise NonTemplatedInputDirException
python
def find_template(repo_dir): """Determine which child directory of `repo_dir` is the project template. :param repo_dir: Local directory of newly cloned repo. :returns project_template: Relative path to project template. """ logger.debug('Searching {} for the project template.'.format(repo_dir)) repo_dir_contents = os.listdir(repo_dir) project_template = None for item in repo_dir_contents: if 'cookiecutter' in item and '{{' in item and '}}' in item: project_template = item break if project_template: project_template = os.path.join(repo_dir, project_template) logger.debug( 'The project template appears to be {}'.format(project_template) ) return project_template else: raise NonTemplatedInputDirException
[ "def", "find_template", "(", "repo_dir", ")", ":", "logger", ".", "debug", "(", "'Searching {} for the project template.'", ".", "format", "(", "repo_dir", ")", ")", "repo_dir_contents", "=", "os", ".", "listdir", "(", "repo_dir", ")", "project_template", "=", "None", "for", "item", "in", "repo_dir_contents", ":", "if", "'cookiecutter'", "in", "item", "and", "'{{'", "in", "item", "and", "'}}'", "in", "item", ":", "project_template", "=", "item", "break", "if", "project_template", ":", "project_template", "=", "os", ".", "path", ".", "join", "(", "repo_dir", ",", "project_template", ")", "logger", ".", "debug", "(", "'The project template appears to be {}'", ".", "format", "(", "project_template", ")", ")", "return", "project_template", "else", ":", "raise", "NonTemplatedInputDirException" ]
Determine which child directory of `repo_dir` is the project template. :param repo_dir: Local directory of newly cloned repo. :returns project_template: Relative path to project template.
[ "Determine", "which", "child", "directory", "of", "repo_dir", "is", "the", "project", "template", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/find.py#L13-L36
train
audreyr/cookiecutter
cookiecutter/generate.py
is_copy_only_path
def is_copy_only_path(path, context): """Check whether the given `path` should only be copied and not rendered. Returns True if `path` matches a pattern in the given `context` dict, otherwise False. :param path: A file-system path referring to a file or dir that should be rendered or just copied. :param context: cookiecutter context. """ try: for dont_render in context['cookiecutter']['_copy_without_render']: if fnmatch.fnmatch(path, dont_render): return True except KeyError: return False return False
python
def is_copy_only_path(path, context): """Check whether the given `path` should only be copied and not rendered. Returns True if `path` matches a pattern in the given `context` dict, otherwise False. :param path: A file-system path referring to a file or dir that should be rendered or just copied. :param context: cookiecutter context. """ try: for dont_render in context['cookiecutter']['_copy_without_render']: if fnmatch.fnmatch(path, dont_render): return True except KeyError: return False return False
[ "def", "is_copy_only_path", "(", "path", ",", "context", ")", ":", "try", ":", "for", "dont_render", "in", "context", "[", "'cookiecutter'", "]", "[", "'_copy_without_render'", "]", ":", "if", "fnmatch", ".", "fnmatch", "(", "path", ",", "dont_render", ")", ":", "return", "True", "except", "KeyError", ":", "return", "False", "return", "False" ]
Check whether the given `path` should only be copied and not rendered. Returns True if `path` matches a pattern in the given `context` dict, otherwise False. :param path: A file-system path referring to a file or dir that should be rendered or just copied. :param context: cookiecutter context.
[ "Check", "whether", "the", "given", "path", "should", "only", "be", "copied", "and", "not", "rendered", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/generate.py#L33-L50
train
audreyr/cookiecutter
cookiecutter/generate.py
apply_overwrites_to_context
def apply_overwrites_to_context(context, overwrite_context): """Modify the given context in place based on the overwrite_context.""" for variable, overwrite in overwrite_context.items(): if variable not in context: # Do not include variables which are not used in the template continue context_value = context[variable] if isinstance(context_value, list): # We are dealing with a choice variable if overwrite in context_value: # This overwrite is actually valid for the given context # Let's set it as default (by definition first item in list) # see ``cookiecutter.prompt.prompt_choice_for_config`` context_value.remove(overwrite) context_value.insert(0, overwrite) else: # Simply overwrite the value for this variable context[variable] = overwrite
python
def apply_overwrites_to_context(context, overwrite_context): """Modify the given context in place based on the overwrite_context.""" for variable, overwrite in overwrite_context.items(): if variable not in context: # Do not include variables which are not used in the template continue context_value = context[variable] if isinstance(context_value, list): # We are dealing with a choice variable if overwrite in context_value: # This overwrite is actually valid for the given context # Let's set it as default (by definition first item in list) # see ``cookiecutter.prompt.prompt_choice_for_config`` context_value.remove(overwrite) context_value.insert(0, overwrite) else: # Simply overwrite the value for this variable context[variable] = overwrite
[ "def", "apply_overwrites_to_context", "(", "context", ",", "overwrite_context", ")", ":", "for", "variable", ",", "overwrite", "in", "overwrite_context", ".", "items", "(", ")", ":", "if", "variable", "not", "in", "context", ":", "# Do not include variables which are not used in the template", "continue", "context_value", "=", "context", "[", "variable", "]", "if", "isinstance", "(", "context_value", ",", "list", ")", ":", "# We are dealing with a choice variable", "if", "overwrite", "in", "context_value", ":", "# This overwrite is actually valid for the given context", "# Let's set it as default (by definition first item in list)", "# see ``cookiecutter.prompt.prompt_choice_for_config``", "context_value", ".", "remove", "(", "overwrite", ")", "context_value", ".", "insert", "(", "0", ",", "overwrite", ")", "else", ":", "# Simply overwrite the value for this variable", "context", "[", "variable", "]", "=", "overwrite" ]
Modify the given context in place based on the overwrite_context.
[ "Modify", "the", "given", "context", "in", "place", "based", "on", "the", "overwrite_context", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/generate.py#L53-L72
train
audreyr/cookiecutter
cookiecutter/generate.py
generate_context
def generate_context(context_file='cookiecutter.json', default_context=None, extra_context=None): """Generate the context for a Cookiecutter project template. Loads the JSON file as a Python object, with key being the JSON filename. :param context_file: JSON file containing key/value pairs for populating the cookiecutter's variables. :param default_context: Dictionary containing config to take into account. :param extra_context: Dictionary containing configuration overrides """ context = OrderedDict([]) try: with open(context_file) as file_handle: obj = json.load(file_handle, object_pairs_hook=OrderedDict) except ValueError as e: # JSON decoding error. Let's throw a new exception that is more # friendly for the developer or user. full_fpath = os.path.abspath(context_file) json_exc_message = str(e) our_exc_message = ( 'JSON decoding error while loading "{0}". Decoding' ' error details: "{1}"'.format(full_fpath, json_exc_message)) raise ContextDecodingException(our_exc_message) # Add the Python object to the context dictionary file_name = os.path.split(context_file)[1] file_stem = file_name.split('.')[0] context[file_stem] = obj # Overwrite context variable defaults with the default context from the # user's global config, if available if default_context: apply_overwrites_to_context(obj, default_context) if extra_context: apply_overwrites_to_context(obj, extra_context) logger.debug('Context generated is {}'.format(context)) return context
python
def generate_context(context_file='cookiecutter.json', default_context=None, extra_context=None): """Generate the context for a Cookiecutter project template. Loads the JSON file as a Python object, with key being the JSON filename. :param context_file: JSON file containing key/value pairs for populating the cookiecutter's variables. :param default_context: Dictionary containing config to take into account. :param extra_context: Dictionary containing configuration overrides """ context = OrderedDict([]) try: with open(context_file) as file_handle: obj = json.load(file_handle, object_pairs_hook=OrderedDict) except ValueError as e: # JSON decoding error. Let's throw a new exception that is more # friendly for the developer or user. full_fpath = os.path.abspath(context_file) json_exc_message = str(e) our_exc_message = ( 'JSON decoding error while loading "{0}". Decoding' ' error details: "{1}"'.format(full_fpath, json_exc_message)) raise ContextDecodingException(our_exc_message) # Add the Python object to the context dictionary file_name = os.path.split(context_file)[1] file_stem = file_name.split('.')[0] context[file_stem] = obj # Overwrite context variable defaults with the default context from the # user's global config, if available if default_context: apply_overwrites_to_context(obj, default_context) if extra_context: apply_overwrites_to_context(obj, extra_context) logger.debug('Context generated is {}'.format(context)) return context
[ "def", "generate_context", "(", "context_file", "=", "'cookiecutter.json'", ",", "default_context", "=", "None", ",", "extra_context", "=", "None", ")", ":", "context", "=", "OrderedDict", "(", "[", "]", ")", "try", ":", "with", "open", "(", "context_file", ")", "as", "file_handle", ":", "obj", "=", "json", ".", "load", "(", "file_handle", ",", "object_pairs_hook", "=", "OrderedDict", ")", "except", "ValueError", "as", "e", ":", "# JSON decoding error. Let's throw a new exception that is more", "# friendly for the developer or user.", "full_fpath", "=", "os", ".", "path", ".", "abspath", "(", "context_file", ")", "json_exc_message", "=", "str", "(", "e", ")", "our_exc_message", "=", "(", "'JSON decoding error while loading \"{0}\". Decoding'", "' error details: \"{1}\"'", ".", "format", "(", "full_fpath", ",", "json_exc_message", ")", ")", "raise", "ContextDecodingException", "(", "our_exc_message", ")", "# Add the Python object to the context dictionary", "file_name", "=", "os", ".", "path", ".", "split", "(", "context_file", ")", "[", "1", "]", "file_stem", "=", "file_name", ".", "split", "(", "'.'", ")", "[", "0", "]", "context", "[", "file_stem", "]", "=", "obj", "# Overwrite context variable defaults with the default context from the", "# user's global config, if available", "if", "default_context", ":", "apply_overwrites_to_context", "(", "obj", ",", "default_context", ")", "if", "extra_context", ":", "apply_overwrites_to_context", "(", "obj", ",", "extra_context", ")", "logger", ".", "debug", "(", "'Context generated is {}'", ".", "format", "(", "context", ")", ")", "return", "context" ]
Generate the context for a Cookiecutter project template. Loads the JSON file as a Python object, with key being the JSON filename. :param context_file: JSON file containing key/value pairs for populating the cookiecutter's variables. :param default_context: Dictionary containing config to take into account. :param extra_context: Dictionary containing configuration overrides
[ "Generate", "the", "context", "for", "a", "Cookiecutter", "project", "template", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/generate.py#L75-L114
train
audreyr/cookiecutter
cookiecutter/generate.py
generate_file
def generate_file(project_dir, infile, context, env): """Render filename of infile as name of outfile, handle infile correctly. Dealing with infile appropriately: a. If infile is a binary file, copy it over without rendering. b. If infile is a text file, render its contents and write the rendered infile to outfile. Precondition: When calling `generate_file()`, the root template dir must be the current working directory. Using `utils.work_in()` is the recommended way to perform this directory change. :param project_dir: Absolute path to the resulting generated project. :param infile: Input file to generate the file from. Relative to the root template dir. :param context: Dict for populating the cookiecutter's variables. :param env: Jinja2 template execution environment. """ logger.debug('Processing file {}'.format(infile)) # Render the path to the output file (not including the root project dir) outfile_tmpl = env.from_string(infile) outfile = os.path.join(project_dir, outfile_tmpl.render(**context)) file_name_is_empty = os.path.isdir(outfile) if file_name_is_empty: logger.debug('The resulting file name is empty: {0}'.format(outfile)) return logger.debug('Created file at {0}'.format(outfile)) # Just copy over binary files. Don't render. logger.debug("Check {} to see if it's a binary".format(infile)) if is_binary(infile): logger.debug( 'Copying binary {} to {} without rendering' ''.format(infile, outfile) ) shutil.copyfile(infile, outfile) else: # Force fwd slashes on Windows for get_template # This is a by-design Jinja issue infile_fwd_slashes = infile.replace(os.path.sep, '/') # Render the file try: tmpl = env.get_template(infile_fwd_slashes) except TemplateSyntaxError as exception: # Disable translated so that printed exception contains verbose # information about syntax error location exception.translated = False raise rendered_file = tmpl.render(**context) logger.debug('Writing contents to file {}'.format(outfile)) with io.open(outfile, 'w', encoding='utf-8') as fh: fh.write(rendered_file) # Apply file permissions to output file shutil.copymode(infile, outfile)
python
def generate_file(project_dir, infile, context, env): """Render filename of infile as name of outfile, handle infile correctly. Dealing with infile appropriately: a. If infile is a binary file, copy it over without rendering. b. If infile is a text file, render its contents and write the rendered infile to outfile. Precondition: When calling `generate_file()`, the root template dir must be the current working directory. Using `utils.work_in()` is the recommended way to perform this directory change. :param project_dir: Absolute path to the resulting generated project. :param infile: Input file to generate the file from. Relative to the root template dir. :param context: Dict for populating the cookiecutter's variables. :param env: Jinja2 template execution environment. """ logger.debug('Processing file {}'.format(infile)) # Render the path to the output file (not including the root project dir) outfile_tmpl = env.from_string(infile) outfile = os.path.join(project_dir, outfile_tmpl.render(**context)) file_name_is_empty = os.path.isdir(outfile) if file_name_is_empty: logger.debug('The resulting file name is empty: {0}'.format(outfile)) return logger.debug('Created file at {0}'.format(outfile)) # Just copy over binary files. Don't render. logger.debug("Check {} to see if it's a binary".format(infile)) if is_binary(infile): logger.debug( 'Copying binary {} to {} without rendering' ''.format(infile, outfile) ) shutil.copyfile(infile, outfile) else: # Force fwd slashes on Windows for get_template # This is a by-design Jinja issue infile_fwd_slashes = infile.replace(os.path.sep, '/') # Render the file try: tmpl = env.get_template(infile_fwd_slashes) except TemplateSyntaxError as exception: # Disable translated so that printed exception contains verbose # information about syntax error location exception.translated = False raise rendered_file = tmpl.render(**context) logger.debug('Writing contents to file {}'.format(outfile)) with io.open(outfile, 'w', encoding='utf-8') as fh: fh.write(rendered_file) # Apply file permissions to output file shutil.copymode(infile, outfile)
[ "def", "generate_file", "(", "project_dir", ",", "infile", ",", "context", ",", "env", ")", ":", "logger", ".", "debug", "(", "'Processing file {}'", ".", "format", "(", "infile", ")", ")", "# Render the path to the output file (not including the root project dir)", "outfile_tmpl", "=", "env", ".", "from_string", "(", "infile", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "outfile_tmpl", ".", "render", "(", "*", "*", "context", ")", ")", "file_name_is_empty", "=", "os", ".", "path", ".", "isdir", "(", "outfile", ")", "if", "file_name_is_empty", ":", "logger", ".", "debug", "(", "'The resulting file name is empty: {0}'", ".", "format", "(", "outfile", ")", ")", "return", "logger", ".", "debug", "(", "'Created file at {0}'", ".", "format", "(", "outfile", ")", ")", "# Just copy over binary files. Don't render.", "logger", ".", "debug", "(", "\"Check {} to see if it's a binary\"", ".", "format", "(", "infile", ")", ")", "if", "is_binary", "(", "infile", ")", ":", "logger", ".", "debug", "(", "'Copying binary {} to {} without rendering'", "''", ".", "format", "(", "infile", ",", "outfile", ")", ")", "shutil", ".", "copyfile", "(", "infile", ",", "outfile", ")", "else", ":", "# Force fwd slashes on Windows for get_template", "# This is a by-design Jinja issue", "infile_fwd_slashes", "=", "infile", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "'/'", ")", "# Render the file", "try", ":", "tmpl", "=", "env", ".", "get_template", "(", "infile_fwd_slashes", ")", "except", "TemplateSyntaxError", "as", "exception", ":", "# Disable translated so that printed exception contains verbose", "# information about syntax error location", "exception", ".", "translated", "=", "False", "raise", "rendered_file", "=", "tmpl", ".", "render", "(", "*", "*", "context", ")", "logger", ".", "debug", "(", "'Writing contents to file {}'", ".", "format", "(", "outfile", ")", ")", "with", "io", ".", "open", "(", "outfile", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "fh", ":", "fh", ".", "write", "(", "rendered_file", ")", "# Apply file permissions to output file", "shutil", ".", "copymode", "(", "infile", ",", "outfile", ")" ]
Render filename of infile as name of outfile, handle infile correctly. Dealing with infile appropriately: a. If infile is a binary file, copy it over without rendering. b. If infile is a text file, render its contents and write the rendered infile to outfile. Precondition: When calling `generate_file()`, the root template dir must be the current working directory. Using `utils.work_in()` is the recommended way to perform this directory change. :param project_dir: Absolute path to the resulting generated project. :param infile: Input file to generate the file from. Relative to the root template dir. :param context: Dict for populating the cookiecutter's variables. :param env: Jinja2 template execution environment.
[ "Render", "filename", "of", "infile", "as", "name", "of", "outfile", "handle", "infile", "correctly", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/generate.py#L117-L180
train
audreyr/cookiecutter
cookiecutter/generate.py
render_and_create_dir
def render_and_create_dir(dirname, context, output_dir, environment, overwrite_if_exists=False): """Render name of a directory, create the directory, return its path.""" name_tmpl = environment.from_string(dirname) rendered_dirname = name_tmpl.render(**context) dir_to_create = os.path.normpath( os.path.join(output_dir, rendered_dirname) ) logger.debug('Rendered dir {} must exist in output_dir {}'.format( dir_to_create, output_dir )) output_dir_exists = os.path.exists(dir_to_create) if output_dir_exists: if overwrite_if_exists: logger.debug( 'Output directory {} already exists,' 'overwriting it'.format(dir_to_create) ) else: msg = 'Error: "{}" directory already exists'.format(dir_to_create) raise OutputDirExistsException(msg) else: make_sure_path_exists(dir_to_create) return dir_to_create, not output_dir_exists
python
def render_and_create_dir(dirname, context, output_dir, environment, overwrite_if_exists=False): """Render name of a directory, create the directory, return its path.""" name_tmpl = environment.from_string(dirname) rendered_dirname = name_tmpl.render(**context) dir_to_create = os.path.normpath( os.path.join(output_dir, rendered_dirname) ) logger.debug('Rendered dir {} must exist in output_dir {}'.format( dir_to_create, output_dir )) output_dir_exists = os.path.exists(dir_to_create) if output_dir_exists: if overwrite_if_exists: logger.debug( 'Output directory {} already exists,' 'overwriting it'.format(dir_to_create) ) else: msg = 'Error: "{}" directory already exists'.format(dir_to_create) raise OutputDirExistsException(msg) else: make_sure_path_exists(dir_to_create) return dir_to_create, not output_dir_exists
[ "def", "render_and_create_dir", "(", "dirname", ",", "context", ",", "output_dir", ",", "environment", ",", "overwrite_if_exists", "=", "False", ")", ":", "name_tmpl", "=", "environment", ".", "from_string", "(", "dirname", ")", "rendered_dirname", "=", "name_tmpl", ".", "render", "(", "*", "*", "context", ")", "dir_to_create", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "output_dir", ",", "rendered_dirname", ")", ")", "logger", ".", "debug", "(", "'Rendered dir {} must exist in output_dir {}'", ".", "format", "(", "dir_to_create", ",", "output_dir", ")", ")", "output_dir_exists", "=", "os", ".", "path", ".", "exists", "(", "dir_to_create", ")", "if", "output_dir_exists", ":", "if", "overwrite_if_exists", ":", "logger", ".", "debug", "(", "'Output directory {} already exists,'", "'overwriting it'", ".", "format", "(", "dir_to_create", ")", ")", "else", ":", "msg", "=", "'Error: \"{}\" directory already exists'", ".", "format", "(", "dir_to_create", ")", "raise", "OutputDirExistsException", "(", "msg", ")", "else", ":", "make_sure_path_exists", "(", "dir_to_create", ")", "return", "dir_to_create", ",", "not", "output_dir_exists" ]
Render name of a directory, create the directory, return its path.
[ "Render", "name", "of", "a", "directory", "create", "the", "directory", "return", "its", "path", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/generate.py#L183-L212
train
audreyr/cookiecutter
cookiecutter/generate.py
_run_hook_from_repo_dir
def _run_hook_from_repo_dir(repo_dir, hook_name, project_dir, context, delete_project_on_failure): """Run hook from repo directory, clean project directory if hook fails. :param repo_dir: Project template input directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. :param delete_project_on_failure: Delete the project directory on hook failure? """ with work_in(repo_dir): try: run_hook(hook_name, project_dir, context) except FailedHookException: if delete_project_on_failure: rmtree(project_dir) logger.error( "Stopping generation because {} hook " "script didn't exit successfully".format(hook_name) ) raise
python
def _run_hook_from_repo_dir(repo_dir, hook_name, project_dir, context, delete_project_on_failure): """Run hook from repo directory, clean project directory if hook fails. :param repo_dir: Project template input directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. :param delete_project_on_failure: Delete the project directory on hook failure? """ with work_in(repo_dir): try: run_hook(hook_name, project_dir, context) except FailedHookException: if delete_project_on_failure: rmtree(project_dir) logger.error( "Stopping generation because {} hook " "script didn't exit successfully".format(hook_name) ) raise
[ "def", "_run_hook_from_repo_dir", "(", "repo_dir", ",", "hook_name", ",", "project_dir", ",", "context", ",", "delete_project_on_failure", ")", ":", "with", "work_in", "(", "repo_dir", ")", ":", "try", ":", "run_hook", "(", "hook_name", ",", "project_dir", ",", "context", ")", "except", "FailedHookException", ":", "if", "delete_project_on_failure", ":", "rmtree", "(", "project_dir", ")", "logger", ".", "error", "(", "\"Stopping generation because {} hook \"", "\"script didn't exit successfully\"", ".", "format", "(", "hook_name", ")", ")", "raise" ]
Run hook from repo directory, clean project directory if hook fails. :param repo_dir: Project template input directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. :param delete_project_on_failure: Delete the project directory on hook failure?
[ "Run", "hook", "from", "repo", "directory", "clean", "project", "directory", "if", "hook", "fails", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/generate.py#L223-L244
train
audreyr/cookiecutter
cookiecutter/generate.py
generate_files
def generate_files(repo_dir, context=None, output_dir='.', overwrite_if_exists=False): """Render the templates and saves them to files. :param repo_dir: Project template input directory. :param context: Dict for populating the template's variables. :param output_dir: Where to output the generated project dir into. :param overwrite_if_exists: Overwrite the contents of the output directory if it exists. """ template_dir = find_template(repo_dir) logger.debug('Generating project from {}...'.format(template_dir)) context = context or OrderedDict([]) unrendered_dir = os.path.split(template_dir)[1] ensure_dir_is_templated(unrendered_dir) env = StrictEnvironment( context=context, keep_trailing_newline=True, ) try: project_dir, output_directory_created = render_and_create_dir( unrendered_dir, context, output_dir, env, overwrite_if_exists ) except UndefinedError as err: msg = "Unable to create project directory '{}'".format(unrendered_dir) raise UndefinedVariableInTemplate(msg, err, context) # We want the Jinja path and the OS paths to match. Consequently, we'll: # + CD to the template folder # + Set Jinja's path to '.' # # In order to build our files to the correct folder(s), we'll use an # absolute path for the target folder (project_dir) project_dir = os.path.abspath(project_dir) logger.debug('Project directory is {}'.format(project_dir)) # if we created the output directory, then it's ok to remove it # if rendering fails delete_project_on_failure = output_directory_created _run_hook_from_repo_dir( repo_dir, 'pre_gen_project', project_dir, context, delete_project_on_failure ) with work_in(template_dir): env.loader = FileSystemLoader('.') for root, dirs, files in os.walk('.'): # We must separate the two types of dirs into different lists. # The reason is that we don't want ``os.walk`` to go through the # unrendered directories, since they will just be copied. copy_dirs = [] render_dirs = [] for d in dirs: d_ = os.path.normpath(os.path.join(root, d)) # We check the full path, because that's how it can be # specified in the ``_copy_without_render`` setting, but # we store just the dir name if is_copy_only_path(d_, context): copy_dirs.append(d) else: render_dirs.append(d) for copy_dir in copy_dirs: indir = os.path.normpath(os.path.join(root, copy_dir)) outdir = os.path.normpath(os.path.join(project_dir, indir)) logger.debug( 'Copying dir {} to {} without rendering' ''.format(indir, outdir) ) shutil.copytree(indir, outdir) # We mutate ``dirs``, because we only want to go through these dirs # recursively dirs[:] = render_dirs for d in dirs: unrendered_dir = os.path.join(project_dir, root, d) try: render_and_create_dir( unrendered_dir, context, output_dir, env, overwrite_if_exists ) except UndefinedError as err: if delete_project_on_failure: rmtree(project_dir) _dir = os.path.relpath(unrendered_dir, output_dir) msg = "Unable to create directory '{}'".format(_dir) raise UndefinedVariableInTemplate(msg, err, context) for f in files: infile = os.path.normpath(os.path.join(root, f)) if is_copy_only_path(infile, context): outfile_tmpl = env.from_string(infile) outfile_rendered = outfile_tmpl.render(**context) outfile = os.path.join(project_dir, outfile_rendered) logger.debug( 'Copying file {} to {} without rendering' ''.format(infile, outfile) ) shutil.copyfile(infile, outfile) shutil.copymode(infile, outfile) continue try: generate_file(project_dir, infile, context, env) except UndefinedError as err: if delete_project_on_failure: rmtree(project_dir) msg = "Unable to create file '{}'".format(infile) raise UndefinedVariableInTemplate(msg, err, context) _run_hook_from_repo_dir( repo_dir, 'post_gen_project', project_dir, context, delete_project_on_failure ) return project_dir
python
def generate_files(repo_dir, context=None, output_dir='.', overwrite_if_exists=False): """Render the templates and saves them to files. :param repo_dir: Project template input directory. :param context: Dict for populating the template's variables. :param output_dir: Where to output the generated project dir into. :param overwrite_if_exists: Overwrite the contents of the output directory if it exists. """ template_dir = find_template(repo_dir) logger.debug('Generating project from {}...'.format(template_dir)) context = context or OrderedDict([]) unrendered_dir = os.path.split(template_dir)[1] ensure_dir_is_templated(unrendered_dir) env = StrictEnvironment( context=context, keep_trailing_newline=True, ) try: project_dir, output_directory_created = render_and_create_dir( unrendered_dir, context, output_dir, env, overwrite_if_exists ) except UndefinedError as err: msg = "Unable to create project directory '{}'".format(unrendered_dir) raise UndefinedVariableInTemplate(msg, err, context) # We want the Jinja path and the OS paths to match. Consequently, we'll: # + CD to the template folder # + Set Jinja's path to '.' # # In order to build our files to the correct folder(s), we'll use an # absolute path for the target folder (project_dir) project_dir = os.path.abspath(project_dir) logger.debug('Project directory is {}'.format(project_dir)) # if we created the output directory, then it's ok to remove it # if rendering fails delete_project_on_failure = output_directory_created _run_hook_from_repo_dir( repo_dir, 'pre_gen_project', project_dir, context, delete_project_on_failure ) with work_in(template_dir): env.loader = FileSystemLoader('.') for root, dirs, files in os.walk('.'): # We must separate the two types of dirs into different lists. # The reason is that we don't want ``os.walk`` to go through the # unrendered directories, since they will just be copied. copy_dirs = [] render_dirs = [] for d in dirs: d_ = os.path.normpath(os.path.join(root, d)) # We check the full path, because that's how it can be # specified in the ``_copy_without_render`` setting, but # we store just the dir name if is_copy_only_path(d_, context): copy_dirs.append(d) else: render_dirs.append(d) for copy_dir in copy_dirs: indir = os.path.normpath(os.path.join(root, copy_dir)) outdir = os.path.normpath(os.path.join(project_dir, indir)) logger.debug( 'Copying dir {} to {} without rendering' ''.format(indir, outdir) ) shutil.copytree(indir, outdir) # We mutate ``dirs``, because we only want to go through these dirs # recursively dirs[:] = render_dirs for d in dirs: unrendered_dir = os.path.join(project_dir, root, d) try: render_and_create_dir( unrendered_dir, context, output_dir, env, overwrite_if_exists ) except UndefinedError as err: if delete_project_on_failure: rmtree(project_dir) _dir = os.path.relpath(unrendered_dir, output_dir) msg = "Unable to create directory '{}'".format(_dir) raise UndefinedVariableInTemplate(msg, err, context) for f in files: infile = os.path.normpath(os.path.join(root, f)) if is_copy_only_path(infile, context): outfile_tmpl = env.from_string(infile) outfile_rendered = outfile_tmpl.render(**context) outfile = os.path.join(project_dir, outfile_rendered) logger.debug( 'Copying file {} to {} without rendering' ''.format(infile, outfile) ) shutil.copyfile(infile, outfile) shutil.copymode(infile, outfile) continue try: generate_file(project_dir, infile, context, env) except UndefinedError as err: if delete_project_on_failure: rmtree(project_dir) msg = "Unable to create file '{}'".format(infile) raise UndefinedVariableInTemplate(msg, err, context) _run_hook_from_repo_dir( repo_dir, 'post_gen_project', project_dir, context, delete_project_on_failure ) return project_dir
[ "def", "generate_files", "(", "repo_dir", ",", "context", "=", "None", ",", "output_dir", "=", "'.'", ",", "overwrite_if_exists", "=", "False", ")", ":", "template_dir", "=", "find_template", "(", "repo_dir", ")", "logger", ".", "debug", "(", "'Generating project from {}...'", ".", "format", "(", "template_dir", ")", ")", "context", "=", "context", "or", "OrderedDict", "(", "[", "]", ")", "unrendered_dir", "=", "os", ".", "path", ".", "split", "(", "template_dir", ")", "[", "1", "]", "ensure_dir_is_templated", "(", "unrendered_dir", ")", "env", "=", "StrictEnvironment", "(", "context", "=", "context", ",", "keep_trailing_newline", "=", "True", ",", ")", "try", ":", "project_dir", ",", "output_directory_created", "=", "render_and_create_dir", "(", "unrendered_dir", ",", "context", ",", "output_dir", ",", "env", ",", "overwrite_if_exists", ")", "except", "UndefinedError", "as", "err", ":", "msg", "=", "\"Unable to create project directory '{}'\"", ".", "format", "(", "unrendered_dir", ")", "raise", "UndefinedVariableInTemplate", "(", "msg", ",", "err", ",", "context", ")", "# We want the Jinja path and the OS paths to match. Consequently, we'll:", "# + CD to the template folder", "# + Set Jinja's path to '.'", "#", "# In order to build our files to the correct folder(s), we'll use an", "# absolute path for the target folder (project_dir)", "project_dir", "=", "os", ".", "path", ".", "abspath", "(", "project_dir", ")", "logger", ".", "debug", "(", "'Project directory is {}'", ".", "format", "(", "project_dir", ")", ")", "# if we created the output directory, then it's ok to remove it", "# if rendering fails", "delete_project_on_failure", "=", "output_directory_created", "_run_hook_from_repo_dir", "(", "repo_dir", ",", "'pre_gen_project'", ",", "project_dir", ",", "context", ",", "delete_project_on_failure", ")", "with", "work_in", "(", "template_dir", ")", ":", "env", ".", "loader", "=", "FileSystemLoader", "(", "'.'", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "'.'", ")", ":", "# We must separate the two types of dirs into different lists.", "# The reason is that we don't want ``os.walk`` to go through the", "# unrendered directories, since they will just be copied.", "copy_dirs", "=", "[", "]", "render_dirs", "=", "[", "]", "for", "d", "in", "dirs", ":", "d_", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "d", ")", ")", "# We check the full path, because that's how it can be", "# specified in the ``_copy_without_render`` setting, but", "# we store just the dir name", "if", "is_copy_only_path", "(", "d_", ",", "context", ")", ":", "copy_dirs", ".", "append", "(", "d", ")", "else", ":", "render_dirs", ".", "append", "(", "d", ")", "for", "copy_dir", "in", "copy_dirs", ":", "indir", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "copy_dir", ")", ")", "outdir", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "project_dir", ",", "indir", ")", ")", "logger", ".", "debug", "(", "'Copying dir {} to {} without rendering'", "''", ".", "format", "(", "indir", ",", "outdir", ")", ")", "shutil", ".", "copytree", "(", "indir", ",", "outdir", ")", "# We mutate ``dirs``, because we only want to go through these dirs", "# recursively", "dirs", "[", ":", "]", "=", "render_dirs", "for", "d", "in", "dirs", ":", "unrendered_dir", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "root", ",", "d", ")", "try", ":", "render_and_create_dir", "(", "unrendered_dir", ",", "context", ",", "output_dir", ",", "env", ",", "overwrite_if_exists", ")", "except", "UndefinedError", "as", "err", ":", "if", "delete_project_on_failure", ":", "rmtree", "(", "project_dir", ")", "_dir", "=", "os", ".", "path", ".", "relpath", "(", "unrendered_dir", ",", "output_dir", ")", "msg", "=", "\"Unable to create directory '{}'\"", ".", "format", "(", "_dir", ")", "raise", "UndefinedVariableInTemplate", "(", "msg", ",", "err", ",", "context", ")", "for", "f", "in", "files", ":", "infile", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ")", "if", "is_copy_only_path", "(", "infile", ",", "context", ")", ":", "outfile_tmpl", "=", "env", ".", "from_string", "(", "infile", ")", "outfile_rendered", "=", "outfile_tmpl", ".", "render", "(", "*", "*", "context", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "outfile_rendered", ")", "logger", ".", "debug", "(", "'Copying file {} to {} without rendering'", "''", ".", "format", "(", "infile", ",", "outfile", ")", ")", "shutil", ".", "copyfile", "(", "infile", ",", "outfile", ")", "shutil", ".", "copymode", "(", "infile", ",", "outfile", ")", "continue", "try", ":", "generate_file", "(", "project_dir", ",", "infile", ",", "context", ",", "env", ")", "except", "UndefinedError", "as", "err", ":", "if", "delete_project_on_failure", ":", "rmtree", "(", "project_dir", ")", "msg", "=", "\"Unable to create file '{}'\"", ".", "format", "(", "infile", ")", "raise", "UndefinedVariableInTemplate", "(", "msg", ",", "err", ",", "context", ")", "_run_hook_from_repo_dir", "(", "repo_dir", ",", "'post_gen_project'", ",", "project_dir", ",", "context", ",", "delete_project_on_failure", ")", "return", "project_dir" ]
Render the templates and saves them to files. :param repo_dir: Project template input directory. :param context: Dict for populating the template's variables. :param output_dir: Where to output the generated project dir into. :param overwrite_if_exists: Overwrite the contents of the output directory if it exists.
[ "Render", "the", "templates", "and", "saves", "them", "to", "files", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/generate.py#L247-L379
train
audreyr/cookiecutter
cookiecutter/config.py
_expand_path
def _expand_path(path): """Expand both environment variables and user home in the given path.""" path = os.path.expandvars(path) path = os.path.expanduser(path) return path
python
def _expand_path(path): """Expand both environment variables and user home in the given path.""" path = os.path.expandvars(path) path = os.path.expanduser(path) return path
[ "def", "_expand_path", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "expandvars", "(", "path", ")", "path", "=", "os", ".", "path", ".", "expanduser", "(", "path", ")", "return", "path" ]
Expand both environment variables and user home in the given path.
[ "Expand", "both", "environment", "variables", "and", "user", "home", "in", "the", "given", "path", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/config.py#L36-L40
train
audreyr/cookiecutter
cookiecutter/config.py
merge_configs
def merge_configs(default, overwrite): """Recursively update a dict with the key/value pair of another. Dict values that are dictionaries themselves will be updated, whilst preserving existing keys. """ new_config = copy.deepcopy(default) for k, v in overwrite.items(): # Make sure to preserve existing items in # nested dicts, for example `abbreviations` if isinstance(v, dict): new_config[k] = merge_configs(default[k], v) else: new_config[k] = v return new_config
python
def merge_configs(default, overwrite): """Recursively update a dict with the key/value pair of another. Dict values that are dictionaries themselves will be updated, whilst preserving existing keys. """ new_config = copy.deepcopy(default) for k, v in overwrite.items(): # Make sure to preserve existing items in # nested dicts, for example `abbreviations` if isinstance(v, dict): new_config[k] = merge_configs(default[k], v) else: new_config[k] = v return new_config
[ "def", "merge_configs", "(", "default", ",", "overwrite", ")", ":", "new_config", "=", "copy", ".", "deepcopy", "(", "default", ")", "for", "k", ",", "v", "in", "overwrite", ".", "items", "(", ")", ":", "# Make sure to preserve existing items in", "# nested dicts, for example `abbreviations`", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "new_config", "[", "k", "]", "=", "merge_configs", "(", "default", "[", "k", "]", ",", "v", ")", "else", ":", "new_config", "[", "k", "]", "=", "v", "return", "new_config" ]
Recursively update a dict with the key/value pair of another. Dict values that are dictionaries themselves will be updated, whilst preserving existing keys.
[ "Recursively", "update", "a", "dict", "with", "the", "key", "/", "value", "pair", "of", "another", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/config.py#L43-L59
train
audreyr/cookiecutter
cookiecutter/config.py
get_config
def get_config(config_path): """Retrieve the config from the specified path, returning a config dict.""" if not os.path.exists(config_path): raise ConfigDoesNotExistException logger.debug('config_path is {0}'.format(config_path)) with io.open(config_path, encoding='utf-8') as file_handle: try: yaml_dict = poyo.parse_string(file_handle.read()) except poyo.exceptions.PoyoException as e: raise InvalidConfiguration( 'Unable to parse YAML file {}. Error: {}' ''.format(config_path, e) ) config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict) raw_replay_dir = config_dict['replay_dir'] config_dict['replay_dir'] = _expand_path(raw_replay_dir) raw_cookies_dir = config_dict['cookiecutters_dir'] config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir) return config_dict
python
def get_config(config_path): """Retrieve the config from the specified path, returning a config dict.""" if not os.path.exists(config_path): raise ConfigDoesNotExistException logger.debug('config_path is {0}'.format(config_path)) with io.open(config_path, encoding='utf-8') as file_handle: try: yaml_dict = poyo.parse_string(file_handle.read()) except poyo.exceptions.PoyoException as e: raise InvalidConfiguration( 'Unable to parse YAML file {}. Error: {}' ''.format(config_path, e) ) config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict) raw_replay_dir = config_dict['replay_dir'] config_dict['replay_dir'] = _expand_path(raw_replay_dir) raw_cookies_dir = config_dict['cookiecutters_dir'] config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir) return config_dict
[ "def", "get_config", "(", "config_path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "config_path", ")", ":", "raise", "ConfigDoesNotExistException", "logger", ".", "debug", "(", "'config_path is {0}'", ".", "format", "(", "config_path", ")", ")", "with", "io", ".", "open", "(", "config_path", ",", "encoding", "=", "'utf-8'", ")", "as", "file_handle", ":", "try", ":", "yaml_dict", "=", "poyo", ".", "parse_string", "(", "file_handle", ".", "read", "(", ")", ")", "except", "poyo", ".", "exceptions", ".", "PoyoException", "as", "e", ":", "raise", "InvalidConfiguration", "(", "'Unable to parse YAML file {}. Error: {}'", "''", ".", "format", "(", "config_path", ",", "e", ")", ")", "config_dict", "=", "merge_configs", "(", "DEFAULT_CONFIG", ",", "yaml_dict", ")", "raw_replay_dir", "=", "config_dict", "[", "'replay_dir'", "]", "config_dict", "[", "'replay_dir'", "]", "=", "_expand_path", "(", "raw_replay_dir", ")", "raw_cookies_dir", "=", "config_dict", "[", "'cookiecutters_dir'", "]", "config_dict", "[", "'cookiecutters_dir'", "]", "=", "_expand_path", "(", "raw_cookies_dir", ")", "return", "config_dict" ]
Retrieve the config from the specified path, returning a config dict.
[ "Retrieve", "the", "config", "from", "the", "specified", "path", "returning", "a", "config", "dict", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/config.py#L62-L85
train
audreyr/cookiecutter
cookiecutter/config.py
get_user_config
def get_user_config(config_file=None, default_config=False): """Return the user config as a dict. If ``default_config`` is True, ignore ``config_file`` and return default values for the config parameters. If a path to a ``config_file`` is given, that is different from the default location, load the user config from that. Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG`` environment variable. If set, load the config from this path. This will raise an error if the specified path is not valid. If the environment variable is not set, try the default config file path before falling back to the default config values. """ # Do NOT load a config. Return defaults instead. if default_config: return copy.copy(DEFAULT_CONFIG) # Load the given config file if config_file and config_file is not USER_CONFIG_PATH: return get_config(config_file) try: # Does the user set up a config environment variable? env_config_file = os.environ['COOKIECUTTER_CONFIG'] except KeyError: # Load an optional user config if it exists # otherwise return the defaults if os.path.exists(USER_CONFIG_PATH): return get_config(USER_CONFIG_PATH) else: return copy.copy(DEFAULT_CONFIG) else: # There is a config environment variable. Try to load it. # Do not check for existence, so invalid file paths raise an error. return get_config(env_config_file)
python
def get_user_config(config_file=None, default_config=False): """Return the user config as a dict. If ``default_config`` is True, ignore ``config_file`` and return default values for the config parameters. If a path to a ``config_file`` is given, that is different from the default location, load the user config from that. Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG`` environment variable. If set, load the config from this path. This will raise an error if the specified path is not valid. If the environment variable is not set, try the default config file path before falling back to the default config values. """ # Do NOT load a config. Return defaults instead. if default_config: return copy.copy(DEFAULT_CONFIG) # Load the given config file if config_file and config_file is not USER_CONFIG_PATH: return get_config(config_file) try: # Does the user set up a config environment variable? env_config_file = os.environ['COOKIECUTTER_CONFIG'] except KeyError: # Load an optional user config if it exists # otherwise return the defaults if os.path.exists(USER_CONFIG_PATH): return get_config(USER_CONFIG_PATH) else: return copy.copy(DEFAULT_CONFIG) else: # There is a config environment variable. Try to load it. # Do not check for existence, so invalid file paths raise an error. return get_config(env_config_file)
[ "def", "get_user_config", "(", "config_file", "=", "None", ",", "default_config", "=", "False", ")", ":", "# Do NOT load a config. Return defaults instead.", "if", "default_config", ":", "return", "copy", ".", "copy", "(", "DEFAULT_CONFIG", ")", "# Load the given config file", "if", "config_file", "and", "config_file", "is", "not", "USER_CONFIG_PATH", ":", "return", "get_config", "(", "config_file", ")", "try", ":", "# Does the user set up a config environment variable?", "env_config_file", "=", "os", ".", "environ", "[", "'COOKIECUTTER_CONFIG'", "]", "except", "KeyError", ":", "# Load an optional user config if it exists", "# otherwise return the defaults", "if", "os", ".", "path", ".", "exists", "(", "USER_CONFIG_PATH", ")", ":", "return", "get_config", "(", "USER_CONFIG_PATH", ")", "else", ":", "return", "copy", ".", "copy", "(", "DEFAULT_CONFIG", ")", "else", ":", "# There is a config environment variable. Try to load it.", "# Do not check for existence, so invalid file paths raise an error.", "return", "get_config", "(", "env_config_file", ")" ]
Return the user config as a dict. If ``default_config`` is True, ignore ``config_file`` and return default values for the config parameters. If a path to a ``config_file`` is given, that is different from the default location, load the user config from that. Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG`` environment variable. If set, load the config from this path. This will raise an error if the specified path is not valid. If the environment variable is not set, try the default config file path before falling back to the default config values.
[ "Return", "the", "user", "config", "as", "a", "dict", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/config.py#L88-L125
train
audreyr/cookiecutter
cookiecutter/utils.py
force_delete
def force_delete(func, path, exc_info): """Error handler for `shutil.rmtree()` equivalent to `rm -rf`. Usage: `shutil.rmtree(path, onerror=force_delete)` From stackoverflow.com/questions/1889597 """ os.chmod(path, stat.S_IWRITE) func(path)
python
def force_delete(func, path, exc_info): """Error handler for `shutil.rmtree()` equivalent to `rm -rf`. Usage: `shutil.rmtree(path, onerror=force_delete)` From stackoverflow.com/questions/1889597 """ os.chmod(path, stat.S_IWRITE) func(path)
[ "def", "force_delete", "(", "func", ",", "path", ",", "exc_info", ")", ":", "os", ".", "chmod", "(", "path", ",", "stat", ".", "S_IWRITE", ")", "func", "(", "path", ")" ]
Error handler for `shutil.rmtree()` equivalent to `rm -rf`. Usage: `shutil.rmtree(path, onerror=force_delete)` From stackoverflow.com/questions/1889597
[ "Error", "handler", "for", "shutil", ".", "rmtree", "()", "equivalent", "to", "rm", "-", "rf", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/utils.py#L19-L26
train
audreyr/cookiecutter
cookiecutter/utils.py
make_sure_path_exists
def make_sure_path_exists(path): """Ensure that a directory exists. :param path: A directory path. """ logger.debug('Making sure path exists: {}'.format(path)) try: os.makedirs(path) logger.debug('Created directory at: {}'.format(path)) except OSError as exception: if exception.errno != errno.EEXIST: return False return True
python
def make_sure_path_exists(path): """Ensure that a directory exists. :param path: A directory path. """ logger.debug('Making sure path exists: {}'.format(path)) try: os.makedirs(path) logger.debug('Created directory at: {}'.format(path)) except OSError as exception: if exception.errno != errno.EEXIST: return False return True
[ "def", "make_sure_path_exists", "(", "path", ")", ":", "logger", ".", "debug", "(", "'Making sure path exists: {}'", ".", "format", "(", "path", ")", ")", "try", ":", "os", ".", "makedirs", "(", "path", ")", "logger", ".", "debug", "(", "'Created directory at: {}'", ".", "format", "(", "path", ")", ")", "except", "OSError", "as", "exception", ":", "if", "exception", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "return", "False", "return", "True" ]
Ensure that a directory exists. :param path: A directory path.
[ "Ensure", "that", "a", "directory", "exists", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/utils.py#L37-L49
train
audreyr/cookiecutter
cookiecutter/utils.py
work_in
def work_in(dirname=None): """Context manager version of os.chdir. When exited, returns to the working directory prior to entering. """ curdir = os.getcwd() try: if dirname is not None: os.chdir(dirname) yield finally: os.chdir(curdir)
python
def work_in(dirname=None): """Context manager version of os.chdir. When exited, returns to the working directory prior to entering. """ curdir = os.getcwd() try: if dirname is not None: os.chdir(dirname) yield finally: os.chdir(curdir)
[ "def", "work_in", "(", "dirname", "=", "None", ")", ":", "curdir", "=", "os", ".", "getcwd", "(", ")", "try", ":", "if", "dirname", "is", "not", "None", ":", "os", ".", "chdir", "(", "dirname", ")", "yield", "finally", ":", "os", ".", "chdir", "(", "curdir", ")" ]
Context manager version of os.chdir. When exited, returns to the working directory prior to entering.
[ "Context", "manager", "version", "of", "os", ".", "chdir", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/utils.py#L53-L64
train
audreyr/cookiecutter
cookiecutter/utils.py
make_executable
def make_executable(script_path): """Make `script_path` executable. :param script_path: The file to change """ status = os.stat(script_path) os.chmod(script_path, status.st_mode | stat.S_IEXEC)
python
def make_executable(script_path): """Make `script_path` executable. :param script_path: The file to change """ status = os.stat(script_path) os.chmod(script_path, status.st_mode | stat.S_IEXEC)
[ "def", "make_executable", "(", "script_path", ")", ":", "status", "=", "os", ".", "stat", "(", "script_path", ")", "os", ".", "chmod", "(", "script_path", ",", "status", ".", "st_mode", "|", "stat", ".", "S_IEXEC", ")" ]
Make `script_path` executable. :param script_path: The file to change
[ "Make", "script_path", "executable", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/utils.py#L67-L73
train
audreyr/cookiecutter
cookiecutter/utils.py
prompt_and_delete
def prompt_and_delete(path, no_input=False): """ Ask user if it's okay to delete the previously-downloaded file/directory. If yes, delete it. If no, checks to see if the old version should be reused. If yes, it's reused; otherwise, Cookiecutter exits. :param path: Previously downloaded zipfile. :param no_input: Suppress prompt to delete repo and just delete it. :return: True if the content was deleted """ # Suppress prompt if called via API if no_input: ok_to_delete = True else: question = ( "You've downloaded {} before. " "Is it okay to delete and re-download it?" ).format(path) ok_to_delete = read_user_yes_no(question, 'yes') if ok_to_delete: if os.path.isdir(path): rmtree(path) else: os.remove(path) return True else: ok_to_reuse = read_user_yes_no( "Do you want to re-use the existing version?", 'yes' ) if ok_to_reuse: return False sys.exit()
python
def prompt_and_delete(path, no_input=False): """ Ask user if it's okay to delete the previously-downloaded file/directory. If yes, delete it. If no, checks to see if the old version should be reused. If yes, it's reused; otherwise, Cookiecutter exits. :param path: Previously downloaded zipfile. :param no_input: Suppress prompt to delete repo and just delete it. :return: True if the content was deleted """ # Suppress prompt if called via API if no_input: ok_to_delete = True else: question = ( "You've downloaded {} before. " "Is it okay to delete and re-download it?" ).format(path) ok_to_delete = read_user_yes_no(question, 'yes') if ok_to_delete: if os.path.isdir(path): rmtree(path) else: os.remove(path) return True else: ok_to_reuse = read_user_yes_no( "Do you want to re-use the existing version?", 'yes' ) if ok_to_reuse: return False sys.exit()
[ "def", "prompt_and_delete", "(", "path", ",", "no_input", "=", "False", ")", ":", "# Suppress prompt if called via API", "if", "no_input", ":", "ok_to_delete", "=", "True", "else", ":", "question", "=", "(", "\"You've downloaded {} before. \"", "\"Is it okay to delete and re-download it?\"", ")", ".", "format", "(", "path", ")", "ok_to_delete", "=", "read_user_yes_no", "(", "question", ",", "'yes'", ")", "if", "ok_to_delete", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "rmtree", "(", "path", ")", "else", ":", "os", ".", "remove", "(", "path", ")", "return", "True", "else", ":", "ok_to_reuse", "=", "read_user_yes_no", "(", "\"Do you want to re-use the existing version?\"", ",", "'yes'", ")", "if", "ok_to_reuse", ":", "return", "False", "sys", ".", "exit", "(", ")" ]
Ask user if it's okay to delete the previously-downloaded file/directory. If yes, delete it. If no, checks to see if the old version should be reused. If yes, it's reused; otherwise, Cookiecutter exits. :param path: Previously downloaded zipfile. :param no_input: Suppress prompt to delete repo and just delete it. :return: True if the content was deleted
[ "Ask", "user", "if", "it", "s", "okay", "to", "delete", "the", "previously", "-", "downloaded", "file", "/", "directory", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/utils.py#L76-L112
train
audreyr/cookiecutter
cookiecutter/zipfile.py
unzip
def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None): """Download and unpack a zipfile at a given URI. This will download the zipfile to the cookiecutter repository, and unpack into a temporary directory. :param zip_uri: The URI for the zipfile. :param is_url: Is the zip URI a URL or a file? :param clone_to_dir: The cookiecutter repository directory to put the archive into. :param no_input: Supress any prompts :param password: The password to use when unpacking the repository. """ # Ensure that clone_to_dir exists clone_to_dir = os.path.expanduser(clone_to_dir) make_sure_path_exists(clone_to_dir) if is_url: # Build the name of the cached zipfile, # and prompt to delete if it already exists. identifier = zip_uri.rsplit('/', 1)[1] zip_path = os.path.join(clone_to_dir, identifier) if os.path.exists(zip_path): download = prompt_and_delete(zip_path, no_input=no_input) else: download = True if download: # (Re) download the zipfile r = requests.get(zip_uri, stream=True) with open(zip_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) else: # Just use the local zipfile as-is. zip_path = os.path.abspath(zip_uri) # Now unpack the repository. The zipfile will be unpacked # into a temporary directory try: zip_file = ZipFile(zip_path) if len(zip_file.namelist()) == 0: raise InvalidZipRepository( 'Zip repository {} is empty'.format(zip_uri) ) # The first record in the zipfile should be the directory entry for # the archive. If it isn't a directory, there's a problem. first_filename = zip_file.namelist()[0] if not first_filename.endswith('/'): raise InvalidZipRepository( 'Zip repository {} does not include ' 'a top-level directory'.format(zip_uri) ) # Construct the final target directory project_name = first_filename[:-1] unzip_base = tempfile.mkdtemp() unzip_path = os.path.join(unzip_base, project_name) # Extract the zip file into the temporary directory try: zip_file.extractall(path=unzip_base) except RuntimeError: # File is password protected; try to get a password from the # environment; if that doesn't work, ask the user. if password is not None: try: zip_file.extractall( path=unzip_base, pwd=password.encode('utf-8') ) except RuntimeError: raise InvalidZipRepository( 'Invalid password provided for protected repository' ) elif no_input: raise InvalidZipRepository( 'Unable to unlock password protected repository' ) else: retry = 0 while retry is not None: try: password = read_repo_password('Repo password') zip_file.extractall( path=unzip_base, pwd=password.encode('utf-8') ) retry = None except RuntimeError: retry += 1 if retry == 3: raise InvalidZipRepository( 'Invalid password provided ' 'for protected repository' ) except BadZipFile: raise InvalidZipRepository( 'Zip repository {} is not a valid zip archive:'.format(zip_uri) ) return unzip_path
python
def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None): """Download and unpack a zipfile at a given URI. This will download the zipfile to the cookiecutter repository, and unpack into a temporary directory. :param zip_uri: The URI for the zipfile. :param is_url: Is the zip URI a URL or a file? :param clone_to_dir: The cookiecutter repository directory to put the archive into. :param no_input: Supress any prompts :param password: The password to use when unpacking the repository. """ # Ensure that clone_to_dir exists clone_to_dir = os.path.expanduser(clone_to_dir) make_sure_path_exists(clone_to_dir) if is_url: # Build the name of the cached zipfile, # and prompt to delete if it already exists. identifier = zip_uri.rsplit('/', 1)[1] zip_path = os.path.join(clone_to_dir, identifier) if os.path.exists(zip_path): download = prompt_and_delete(zip_path, no_input=no_input) else: download = True if download: # (Re) download the zipfile r = requests.get(zip_uri, stream=True) with open(zip_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) else: # Just use the local zipfile as-is. zip_path = os.path.abspath(zip_uri) # Now unpack the repository. The zipfile will be unpacked # into a temporary directory try: zip_file = ZipFile(zip_path) if len(zip_file.namelist()) == 0: raise InvalidZipRepository( 'Zip repository {} is empty'.format(zip_uri) ) # The first record in the zipfile should be the directory entry for # the archive. If it isn't a directory, there's a problem. first_filename = zip_file.namelist()[0] if not first_filename.endswith('/'): raise InvalidZipRepository( 'Zip repository {} does not include ' 'a top-level directory'.format(zip_uri) ) # Construct the final target directory project_name = first_filename[:-1] unzip_base = tempfile.mkdtemp() unzip_path = os.path.join(unzip_base, project_name) # Extract the zip file into the temporary directory try: zip_file.extractall(path=unzip_base) except RuntimeError: # File is password protected; try to get a password from the # environment; if that doesn't work, ask the user. if password is not None: try: zip_file.extractall( path=unzip_base, pwd=password.encode('utf-8') ) except RuntimeError: raise InvalidZipRepository( 'Invalid password provided for protected repository' ) elif no_input: raise InvalidZipRepository( 'Unable to unlock password protected repository' ) else: retry = 0 while retry is not None: try: password = read_repo_password('Repo password') zip_file.extractall( path=unzip_base, pwd=password.encode('utf-8') ) retry = None except RuntimeError: retry += 1 if retry == 3: raise InvalidZipRepository( 'Invalid password provided ' 'for protected repository' ) except BadZipFile: raise InvalidZipRepository( 'Zip repository {} is not a valid zip archive:'.format(zip_uri) ) return unzip_path
[ "def", "unzip", "(", "zip_uri", ",", "is_url", ",", "clone_to_dir", "=", "'.'", ",", "no_input", "=", "False", ",", "password", "=", "None", ")", ":", "# Ensure that clone_to_dir exists", "clone_to_dir", "=", "os", ".", "path", ".", "expanduser", "(", "clone_to_dir", ")", "make_sure_path_exists", "(", "clone_to_dir", ")", "if", "is_url", ":", "# Build the name of the cached zipfile,", "# and prompt to delete if it already exists.", "identifier", "=", "zip_uri", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "1", "]", "zip_path", "=", "os", ".", "path", ".", "join", "(", "clone_to_dir", ",", "identifier", ")", "if", "os", ".", "path", ".", "exists", "(", "zip_path", ")", ":", "download", "=", "prompt_and_delete", "(", "zip_path", ",", "no_input", "=", "no_input", ")", "else", ":", "download", "=", "True", "if", "download", ":", "# (Re) download the zipfile", "r", "=", "requests", ".", "get", "(", "zip_uri", ",", "stream", "=", "True", ")", "with", "open", "(", "zip_path", ",", "'wb'", ")", "as", "f", ":", "for", "chunk", "in", "r", ".", "iter_content", "(", "chunk_size", "=", "1024", ")", ":", "if", "chunk", ":", "# filter out keep-alive new chunks", "f", ".", "write", "(", "chunk", ")", "else", ":", "# Just use the local zipfile as-is.", "zip_path", "=", "os", ".", "path", ".", "abspath", "(", "zip_uri", ")", "# Now unpack the repository. The zipfile will be unpacked", "# into a temporary directory", "try", ":", "zip_file", "=", "ZipFile", "(", "zip_path", ")", "if", "len", "(", "zip_file", ".", "namelist", "(", ")", ")", "==", "0", ":", "raise", "InvalidZipRepository", "(", "'Zip repository {} is empty'", ".", "format", "(", "zip_uri", ")", ")", "# The first record in the zipfile should be the directory entry for", "# the archive. If it isn't a directory, there's a problem.", "first_filename", "=", "zip_file", ".", "namelist", "(", ")", "[", "0", "]", "if", "not", "first_filename", ".", "endswith", "(", "'/'", ")", ":", "raise", "InvalidZipRepository", "(", "'Zip repository {} does not include '", "'a top-level directory'", ".", "format", "(", "zip_uri", ")", ")", "# Construct the final target directory", "project_name", "=", "first_filename", "[", ":", "-", "1", "]", "unzip_base", "=", "tempfile", ".", "mkdtemp", "(", ")", "unzip_path", "=", "os", ".", "path", ".", "join", "(", "unzip_base", ",", "project_name", ")", "# Extract the zip file into the temporary directory", "try", ":", "zip_file", ".", "extractall", "(", "path", "=", "unzip_base", ")", "except", "RuntimeError", ":", "# File is password protected; try to get a password from the", "# environment; if that doesn't work, ask the user.", "if", "password", "is", "not", "None", ":", "try", ":", "zip_file", ".", "extractall", "(", "path", "=", "unzip_base", ",", "pwd", "=", "password", ".", "encode", "(", "'utf-8'", ")", ")", "except", "RuntimeError", ":", "raise", "InvalidZipRepository", "(", "'Invalid password provided for protected repository'", ")", "elif", "no_input", ":", "raise", "InvalidZipRepository", "(", "'Unable to unlock password protected repository'", ")", "else", ":", "retry", "=", "0", "while", "retry", "is", "not", "None", ":", "try", ":", "password", "=", "read_repo_password", "(", "'Repo password'", ")", "zip_file", ".", "extractall", "(", "path", "=", "unzip_base", ",", "pwd", "=", "password", ".", "encode", "(", "'utf-8'", ")", ")", "retry", "=", "None", "except", "RuntimeError", ":", "retry", "+=", "1", "if", "retry", "==", "3", ":", "raise", "InvalidZipRepository", "(", "'Invalid password provided '", "'for protected repository'", ")", "except", "BadZipFile", ":", "raise", "InvalidZipRepository", "(", "'Zip repository {} is not a valid zip archive:'", ".", "format", "(", "zip_uri", ")", ")", "return", "unzip_path" ]
Download and unpack a zipfile at a given URI. This will download the zipfile to the cookiecutter repository, and unpack into a temporary directory. :param zip_uri: The URI for the zipfile. :param is_url: Is the zip URI a URL or a file? :param clone_to_dir: The cookiecutter repository directory to put the archive into. :param no_input: Supress any prompts :param password: The password to use when unpacking the repository.
[ "Download", "and", "unpack", "a", "zipfile", "at", "a", "given", "URI", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/zipfile.py#L18-L124
train
audreyr/cookiecutter
cookiecutter/main.py
cookiecutter
def cookiecutter( template, checkout=None, no_input=False, extra_context=None, replay=False, overwrite_if_exists=False, output_dir='.', config_file=None, default_config=False, password=None): """ Run Cookiecutter just as if using it from the command line. :param template: A directory containing a project template directory, or a URL to a git repository. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param extra_context: A dictionary of context that overrides default and user configuration. :param: overwrite_if_exists: Overwrite the contents of output directory if it exists :param output_dir: Where to output the generated project dir into. :param config_file: User configuration file path. :param default_config: Use default values rather than a config file. :param password: The password to use when extracting the repository. """ if replay and ((no_input is not False) or (extra_context is not None)): err_msg = ( "You can not use both replay and no_input or extra_context " "at the same time." ) raise InvalidModeException(err_msg) config_dict = get_user_config( config_file=config_file, default_config=default_config, ) repo_dir, cleanup = determine_repo_dir( template=template, abbreviations=config_dict['abbreviations'], clone_to_dir=config_dict['cookiecutters_dir'], checkout=checkout, no_input=no_input, password=password ) template_name = os.path.basename(os.path.abspath(repo_dir)) if replay: context = load(config_dict['replay_dir'], template_name) else: context_file = os.path.join(repo_dir, 'cookiecutter.json') logger.debug('context_file is {}'.format(context_file)) context = generate_context( context_file=context_file, default_context=config_dict['default_context'], extra_context=extra_context, ) # prompt the user to manually configure at the command line. # except when 'no-input' flag is set context['cookiecutter'] = prompt_for_config(context, no_input) # include template dir or url in the context dict context['cookiecutter']['_template'] = template dump(config_dict['replay_dir'], template_name, context) # Create project from local context and project template. result = generate_files( repo_dir=repo_dir, context=context, overwrite_if_exists=overwrite_if_exists, output_dir=output_dir ) # Cleanup (if required) if cleanup: rmtree(repo_dir) return result
python
def cookiecutter( template, checkout=None, no_input=False, extra_context=None, replay=False, overwrite_if_exists=False, output_dir='.', config_file=None, default_config=False, password=None): """ Run Cookiecutter just as if using it from the command line. :param template: A directory containing a project template directory, or a URL to a git repository. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param extra_context: A dictionary of context that overrides default and user configuration. :param: overwrite_if_exists: Overwrite the contents of output directory if it exists :param output_dir: Where to output the generated project dir into. :param config_file: User configuration file path. :param default_config: Use default values rather than a config file. :param password: The password to use when extracting the repository. """ if replay and ((no_input is not False) or (extra_context is not None)): err_msg = ( "You can not use both replay and no_input or extra_context " "at the same time." ) raise InvalidModeException(err_msg) config_dict = get_user_config( config_file=config_file, default_config=default_config, ) repo_dir, cleanup = determine_repo_dir( template=template, abbreviations=config_dict['abbreviations'], clone_to_dir=config_dict['cookiecutters_dir'], checkout=checkout, no_input=no_input, password=password ) template_name = os.path.basename(os.path.abspath(repo_dir)) if replay: context = load(config_dict['replay_dir'], template_name) else: context_file = os.path.join(repo_dir, 'cookiecutter.json') logger.debug('context_file is {}'.format(context_file)) context = generate_context( context_file=context_file, default_context=config_dict['default_context'], extra_context=extra_context, ) # prompt the user to manually configure at the command line. # except when 'no-input' flag is set context['cookiecutter'] = prompt_for_config(context, no_input) # include template dir or url in the context dict context['cookiecutter']['_template'] = template dump(config_dict['replay_dir'], template_name, context) # Create project from local context and project template. result = generate_files( repo_dir=repo_dir, context=context, overwrite_if_exists=overwrite_if_exists, output_dir=output_dir ) # Cleanup (if required) if cleanup: rmtree(repo_dir) return result
[ "def", "cookiecutter", "(", "template", ",", "checkout", "=", "None", ",", "no_input", "=", "False", ",", "extra_context", "=", "None", ",", "replay", "=", "False", ",", "overwrite_if_exists", "=", "False", ",", "output_dir", "=", "'.'", ",", "config_file", "=", "None", ",", "default_config", "=", "False", ",", "password", "=", "None", ")", ":", "if", "replay", "and", "(", "(", "no_input", "is", "not", "False", ")", "or", "(", "extra_context", "is", "not", "None", ")", ")", ":", "err_msg", "=", "(", "\"You can not use both replay and no_input or extra_context \"", "\"at the same time.\"", ")", "raise", "InvalidModeException", "(", "err_msg", ")", "config_dict", "=", "get_user_config", "(", "config_file", "=", "config_file", ",", "default_config", "=", "default_config", ",", ")", "repo_dir", ",", "cleanup", "=", "determine_repo_dir", "(", "template", "=", "template", ",", "abbreviations", "=", "config_dict", "[", "'abbreviations'", "]", ",", "clone_to_dir", "=", "config_dict", "[", "'cookiecutters_dir'", "]", ",", "checkout", "=", "checkout", ",", "no_input", "=", "no_input", ",", "password", "=", "password", ")", "template_name", "=", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "abspath", "(", "repo_dir", ")", ")", "if", "replay", ":", "context", "=", "load", "(", "config_dict", "[", "'replay_dir'", "]", ",", "template_name", ")", "else", ":", "context_file", "=", "os", ".", "path", ".", "join", "(", "repo_dir", ",", "'cookiecutter.json'", ")", "logger", ".", "debug", "(", "'context_file is {}'", ".", "format", "(", "context_file", ")", ")", "context", "=", "generate_context", "(", "context_file", "=", "context_file", ",", "default_context", "=", "config_dict", "[", "'default_context'", "]", ",", "extra_context", "=", "extra_context", ",", ")", "# prompt the user to manually configure at the command line.", "# except when 'no-input' flag is set", "context", "[", "'cookiecutter'", "]", "=", "prompt_for_config", "(", "context", ",", "no_input", ")", "# include template dir or url in the context dict", "context", "[", "'cookiecutter'", "]", "[", "'_template'", "]", "=", "template", "dump", "(", "config_dict", "[", "'replay_dir'", "]", ",", "template_name", ",", "context", ")", "# Create project from local context and project template.", "result", "=", "generate_files", "(", "repo_dir", "=", "repo_dir", ",", "context", "=", "context", ",", "overwrite_if_exists", "=", "overwrite_if_exists", ",", "output_dir", "=", "output_dir", ")", "# Cleanup (if required)", "if", "cleanup", ":", "rmtree", "(", "repo_dir", ")", "return", "result" ]
Run Cookiecutter just as if using it from the command line. :param template: A directory containing a project template directory, or a URL to a git repository. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param extra_context: A dictionary of context that overrides default and user configuration. :param: overwrite_if_exists: Overwrite the contents of output directory if it exists :param output_dir: Where to output the generated project dir into. :param config_file: User configuration file path. :param default_config: Use default values rather than a config file. :param password: The password to use when extracting the repository.
[ "Run", "Cookiecutter", "just", "as", "if", "using", "it", "from", "the", "command", "line", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/main.py#L25-L101
train
audreyr/cookiecutter
cookiecutter/prompt.py
read_user_yes_no
def read_user_yes_no(question, default_value): """Prompt the user to reply with 'yes' or 'no' (or equivalent values). Note: Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n' :param str question: Question to the user :param default_value: Value that will be returned if no input happens """ # Please see http://click.pocoo.org/4/api/#click.prompt return click.prompt( question, default=default_value, type=click.BOOL )
python
def read_user_yes_no(question, default_value): """Prompt the user to reply with 'yes' or 'no' (or equivalent values). Note: Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n' :param str question: Question to the user :param default_value: Value that will be returned if no input happens """ # Please see http://click.pocoo.org/4/api/#click.prompt return click.prompt( question, default=default_value, type=click.BOOL )
[ "def", "read_user_yes_no", "(", "question", ",", "default_value", ")", ":", "# Please see http://click.pocoo.org/4/api/#click.prompt", "return", "click", ".", "prompt", "(", "question", ",", "default", "=", "default_value", ",", "type", "=", "click", ".", "BOOL", ")" ]
Prompt the user to reply with 'yes' or 'no' (or equivalent values). Note: Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n' :param str question: Question to the user :param default_value: Value that will be returned if no input happens
[ "Prompt", "the", "user", "to", "reply", "with", "yes", "or", "no", "(", "or", "equivalent", "values", ")", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/prompt.py#L35-L49
train
audreyr/cookiecutter
cookiecutter/prompt.py
read_user_choice
def read_user_choice(var_name, options): """Prompt the user to choose from several options for the given variable. The first item will be returned if no input happens. :param str var_name: Variable as specified in the context :param list options: Sequence of options that are available to select from :return: Exactly one item of ``options`` that has been chosen by the user """ # Please see http://click.pocoo.org/4/api/#click.prompt if not isinstance(options, list): raise TypeError if not options: raise ValueError choice_map = OrderedDict( (u'{}'.format(i), value) for i, value in enumerate(options, 1) ) choices = choice_map.keys() default = u'1' choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()] prompt = u'\n'.join(( u'Select {}:'.format(var_name), u'\n'.join(choice_lines), u'Choose from {}'.format(u', '.join(choices)) )) user_choice = click.prompt( prompt, type=click.Choice(choices), default=default ) return choice_map[user_choice]
python
def read_user_choice(var_name, options): """Prompt the user to choose from several options for the given variable. The first item will be returned if no input happens. :param str var_name: Variable as specified in the context :param list options: Sequence of options that are available to select from :return: Exactly one item of ``options`` that has been chosen by the user """ # Please see http://click.pocoo.org/4/api/#click.prompt if not isinstance(options, list): raise TypeError if not options: raise ValueError choice_map = OrderedDict( (u'{}'.format(i), value) for i, value in enumerate(options, 1) ) choices = choice_map.keys() default = u'1' choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()] prompt = u'\n'.join(( u'Select {}:'.format(var_name), u'\n'.join(choice_lines), u'Choose from {}'.format(u', '.join(choices)) )) user_choice = click.prompt( prompt, type=click.Choice(choices), default=default ) return choice_map[user_choice]
[ "def", "read_user_choice", "(", "var_name", ",", "options", ")", ":", "# Please see http://click.pocoo.org/4/api/#click.prompt", "if", "not", "isinstance", "(", "options", ",", "list", ")", ":", "raise", "TypeError", "if", "not", "options", ":", "raise", "ValueError", "choice_map", "=", "OrderedDict", "(", "(", "u'{}'", ".", "format", "(", "i", ")", ",", "value", ")", "for", "i", ",", "value", "in", "enumerate", "(", "options", ",", "1", ")", ")", "choices", "=", "choice_map", ".", "keys", "(", ")", "default", "=", "u'1'", "choice_lines", "=", "[", "u'{} - {}'", ".", "format", "(", "*", "c", ")", "for", "c", "in", "choice_map", ".", "items", "(", ")", "]", "prompt", "=", "u'\\n'", ".", "join", "(", "(", "u'Select {}:'", ".", "format", "(", "var_name", ")", ",", "u'\\n'", ".", "join", "(", "choice_lines", ")", ",", "u'Choose from {}'", ".", "format", "(", "u', '", ".", "join", "(", "choices", ")", ")", ")", ")", "user_choice", "=", "click", ".", "prompt", "(", "prompt", ",", "type", "=", "click", ".", "Choice", "(", "choices", ")", ",", "default", "=", "default", ")", "return", "choice_map", "[", "user_choice", "]" ]
Prompt the user to choose from several options for the given variable. The first item will be returned if no input happens. :param str var_name: Variable as specified in the context :param list options: Sequence of options that are available to select from :return: Exactly one item of ``options`` that has been chosen by the user
[ "Prompt", "the", "user", "to", "choose", "from", "several", "options", "for", "the", "given", "variable", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/prompt.py#L61-L93
train
audreyr/cookiecutter
cookiecutter/prompt.py
read_user_dict
def read_user_dict(var_name, default_value): """Prompt the user to provide a dictionary of data. :param str var_name: Variable as specified in the context :param default_value: Value that will be returned if no input is provided :return: A Python dictionary to use in the context. """ # Please see http://click.pocoo.org/4/api/#click.prompt if not isinstance(default_value, dict): raise TypeError default_display = 'default' user_value = click.prompt( var_name, default=default_display, type=click.STRING, value_proc=process_json, ) if user_value == default_display: # Return the given default w/o any processing return default_value return user_value
python
def read_user_dict(var_name, default_value): """Prompt the user to provide a dictionary of data. :param str var_name: Variable as specified in the context :param default_value: Value that will be returned if no input is provided :return: A Python dictionary to use in the context. """ # Please see http://click.pocoo.org/4/api/#click.prompt if not isinstance(default_value, dict): raise TypeError default_display = 'default' user_value = click.prompt( var_name, default=default_display, type=click.STRING, value_proc=process_json, ) if user_value == default_display: # Return the given default w/o any processing return default_value return user_value
[ "def", "read_user_dict", "(", "var_name", ",", "default_value", ")", ":", "# Please see http://click.pocoo.org/4/api/#click.prompt", "if", "not", "isinstance", "(", "default_value", ",", "dict", ")", ":", "raise", "TypeError", "default_display", "=", "'default'", "user_value", "=", "click", ".", "prompt", "(", "var_name", ",", "default", "=", "default_display", ",", "type", "=", "click", ".", "STRING", ",", "value_proc", "=", "process_json", ",", ")", "if", "user_value", "==", "default_display", ":", "# Return the given default w/o any processing", "return", "default_value", "return", "user_value" ]
Prompt the user to provide a dictionary of data. :param str var_name: Variable as specified in the context :param default_value: Value that will be returned if no input is provided :return: A Python dictionary to use in the context.
[ "Prompt", "the", "user", "to", "provide", "a", "dictionary", "of", "data", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/prompt.py#L113-L136
train
audreyr/cookiecutter
cookiecutter/prompt.py
render_variable
def render_variable(env, raw, cookiecutter_dict): """Inside the prompting taken from the cookiecutter.json file, this renders the next variable. For example, if a project_name is "Peanut Butter Cookie", the repo_name could be be rendered with: `{{ cookiecutter.project_name.replace(" ", "_") }}`. This is then presented to the user as the default. :param Environment env: A Jinja2 Environment object. :param str raw: The next value to be prompted for by the user. :param dict cookiecutter_dict: The current context as it's gradually being populated with variables. :return: The rendered value for the default variable. """ if raw is None: return None elif isinstance(raw, dict): return { render_variable(env, k, cookiecutter_dict): render_variable(env, v, cookiecutter_dict) for k, v in raw.items() } elif isinstance(raw, list): return [ render_variable(env, v, cookiecutter_dict) for v in raw ] elif not isinstance(raw, basestring): raw = str(raw) template = env.from_string(raw) rendered_template = template.render(cookiecutter=cookiecutter_dict) return rendered_template
python
def render_variable(env, raw, cookiecutter_dict): """Inside the prompting taken from the cookiecutter.json file, this renders the next variable. For example, if a project_name is "Peanut Butter Cookie", the repo_name could be be rendered with: `{{ cookiecutter.project_name.replace(" ", "_") }}`. This is then presented to the user as the default. :param Environment env: A Jinja2 Environment object. :param str raw: The next value to be prompted for by the user. :param dict cookiecutter_dict: The current context as it's gradually being populated with variables. :return: The rendered value for the default variable. """ if raw is None: return None elif isinstance(raw, dict): return { render_variable(env, k, cookiecutter_dict): render_variable(env, v, cookiecutter_dict) for k, v in raw.items() } elif isinstance(raw, list): return [ render_variable(env, v, cookiecutter_dict) for v in raw ] elif not isinstance(raw, basestring): raw = str(raw) template = env.from_string(raw) rendered_template = template.render(cookiecutter=cookiecutter_dict) return rendered_template
[ "def", "render_variable", "(", "env", ",", "raw", ",", "cookiecutter_dict", ")", ":", "if", "raw", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "raw", ",", "dict", ")", ":", "return", "{", "render_variable", "(", "env", ",", "k", ",", "cookiecutter_dict", ")", ":", "render_variable", "(", "env", ",", "v", ",", "cookiecutter_dict", ")", "for", "k", ",", "v", "in", "raw", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "raw", ",", "list", ")", ":", "return", "[", "render_variable", "(", "env", ",", "v", ",", "cookiecutter_dict", ")", "for", "v", "in", "raw", "]", "elif", "not", "isinstance", "(", "raw", ",", "basestring", ")", ":", "raw", "=", "str", "(", "raw", ")", "template", "=", "env", ".", "from_string", "(", "raw", ")", "rendered_template", "=", "template", ".", "render", "(", "cookiecutter", "=", "cookiecutter_dict", ")", "return", "rendered_template" ]
Inside the prompting taken from the cookiecutter.json file, this renders the next variable. For example, if a project_name is "Peanut Butter Cookie", the repo_name could be be rendered with: `{{ cookiecutter.project_name.replace(" ", "_") }}`. This is then presented to the user as the default. :param Environment env: A Jinja2 Environment object. :param str raw: The next value to be prompted for by the user. :param dict cookiecutter_dict: The current context as it's gradually being populated with variables. :return: The rendered value for the default variable.
[ "Inside", "the", "prompting", "taken", "from", "the", "cookiecutter", ".", "json", "file", "this", "renders", "the", "next", "variable", ".", "For", "example", "if", "a", "project_name", "is", "Peanut", "Butter", "Cookie", "the", "repo_name", "could", "be", "be", "rendered", "with", ":" ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/prompt.py#L139-L173
train
audreyr/cookiecutter
cookiecutter/prompt.py
prompt_choice_for_config
def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input): """Prompt the user which option to choose from the given. Each of the possible choices is rendered beforehand. """ rendered_options = [ render_variable(env, raw, cookiecutter_dict) for raw in options ] if no_input: return rendered_options[0] return read_user_choice(key, rendered_options)
python
def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input): """Prompt the user which option to choose from the given. Each of the possible choices is rendered beforehand. """ rendered_options = [ render_variable(env, raw, cookiecutter_dict) for raw in options ] if no_input: return rendered_options[0] return read_user_choice(key, rendered_options)
[ "def", "prompt_choice_for_config", "(", "cookiecutter_dict", ",", "env", ",", "key", ",", "options", ",", "no_input", ")", ":", "rendered_options", "=", "[", "render_variable", "(", "env", ",", "raw", ",", "cookiecutter_dict", ")", "for", "raw", "in", "options", "]", "if", "no_input", ":", "return", "rendered_options", "[", "0", "]", "return", "read_user_choice", "(", "key", ",", "rendered_options", ")" ]
Prompt the user which option to choose from the given. Each of the possible choices is rendered beforehand.
[ "Prompt", "the", "user", "which", "option", "to", "choose", "from", "the", "given", ".", "Each", "of", "the", "possible", "choices", "is", "rendered", "beforehand", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/prompt.py#L176-L186
train
audreyr/cookiecutter
cookiecutter/prompt.py
prompt_for_config
def prompt_for_config(context, no_input=False): """ Prompts the user to enter new config, using context as a source for the field names and sample values. :param no_input: Prompt the user at command line for manual configuration? """ cookiecutter_dict = OrderedDict([]) env = StrictEnvironment(context=context) # First pass: Handle simple and raw variables, plus choices. # These must be done first because the dictionaries keys and # values might refer to them. for key, raw in iteritems(context[u'cookiecutter']): if key.startswith(u'_'): cookiecutter_dict[key] = raw continue try: if isinstance(raw, list): # We are dealing with a choice variable val = prompt_choice_for_config( cookiecutter_dict, env, key, raw, no_input ) cookiecutter_dict[key] = val elif not isinstance(raw, dict): # We are dealing with a regular variable val = render_variable(env, raw, cookiecutter_dict) if not no_input: val = read_user_variable(key, val) cookiecutter_dict[key] = val except UndefinedError as err: msg = "Unable to render variable '{}'".format(key) raise UndefinedVariableInTemplate(msg, err, context) # Second pass; handle the dictionaries. for key, raw in iteritems(context[u'cookiecutter']): try: if isinstance(raw, dict): # We are dealing with a dict variable val = render_variable(env, raw, cookiecutter_dict) if not no_input: val = read_user_dict(key, val) cookiecutter_dict[key] = val except UndefinedError as err: msg = "Unable to render variable '{}'".format(key) raise UndefinedVariableInTemplate(msg, err, context) return cookiecutter_dict
python
def prompt_for_config(context, no_input=False): """ Prompts the user to enter new config, using context as a source for the field names and sample values. :param no_input: Prompt the user at command line for manual configuration? """ cookiecutter_dict = OrderedDict([]) env = StrictEnvironment(context=context) # First pass: Handle simple and raw variables, plus choices. # These must be done first because the dictionaries keys and # values might refer to them. for key, raw in iteritems(context[u'cookiecutter']): if key.startswith(u'_'): cookiecutter_dict[key] = raw continue try: if isinstance(raw, list): # We are dealing with a choice variable val = prompt_choice_for_config( cookiecutter_dict, env, key, raw, no_input ) cookiecutter_dict[key] = val elif not isinstance(raw, dict): # We are dealing with a regular variable val = render_variable(env, raw, cookiecutter_dict) if not no_input: val = read_user_variable(key, val) cookiecutter_dict[key] = val except UndefinedError as err: msg = "Unable to render variable '{}'".format(key) raise UndefinedVariableInTemplate(msg, err, context) # Second pass; handle the dictionaries. for key, raw in iteritems(context[u'cookiecutter']): try: if isinstance(raw, dict): # We are dealing with a dict variable val = render_variable(env, raw, cookiecutter_dict) if not no_input: val = read_user_dict(key, val) cookiecutter_dict[key] = val except UndefinedError as err: msg = "Unable to render variable '{}'".format(key) raise UndefinedVariableInTemplate(msg, err, context) return cookiecutter_dict
[ "def", "prompt_for_config", "(", "context", ",", "no_input", "=", "False", ")", ":", "cookiecutter_dict", "=", "OrderedDict", "(", "[", "]", ")", "env", "=", "StrictEnvironment", "(", "context", "=", "context", ")", "# First pass: Handle simple and raw variables, plus choices.", "# These must be done first because the dictionaries keys and", "# values might refer to them.", "for", "key", ",", "raw", "in", "iteritems", "(", "context", "[", "u'cookiecutter'", "]", ")", ":", "if", "key", ".", "startswith", "(", "u'_'", ")", ":", "cookiecutter_dict", "[", "key", "]", "=", "raw", "continue", "try", ":", "if", "isinstance", "(", "raw", ",", "list", ")", ":", "# We are dealing with a choice variable", "val", "=", "prompt_choice_for_config", "(", "cookiecutter_dict", ",", "env", ",", "key", ",", "raw", ",", "no_input", ")", "cookiecutter_dict", "[", "key", "]", "=", "val", "elif", "not", "isinstance", "(", "raw", ",", "dict", ")", ":", "# We are dealing with a regular variable", "val", "=", "render_variable", "(", "env", ",", "raw", ",", "cookiecutter_dict", ")", "if", "not", "no_input", ":", "val", "=", "read_user_variable", "(", "key", ",", "val", ")", "cookiecutter_dict", "[", "key", "]", "=", "val", "except", "UndefinedError", "as", "err", ":", "msg", "=", "\"Unable to render variable '{}'\"", ".", "format", "(", "key", ")", "raise", "UndefinedVariableInTemplate", "(", "msg", ",", "err", ",", "context", ")", "# Second pass; handle the dictionaries.", "for", "key", ",", "raw", "in", "iteritems", "(", "context", "[", "u'cookiecutter'", "]", ")", ":", "try", ":", "if", "isinstance", "(", "raw", ",", "dict", ")", ":", "# We are dealing with a dict variable", "val", "=", "render_variable", "(", "env", ",", "raw", ",", "cookiecutter_dict", ")", "if", "not", "no_input", ":", "val", "=", "read_user_dict", "(", "key", ",", "val", ")", "cookiecutter_dict", "[", "key", "]", "=", "val", "except", "UndefinedError", "as", "err", ":", "msg", "=", "\"Unable to render variable '{}'\"", ".", "format", "(", "key", ")", "raise", "UndefinedVariableInTemplate", "(", "msg", ",", "err", ",", "context", ")", "return", "cookiecutter_dict" ]
Prompts the user to enter new config, using context as a source for the field names and sample values. :param no_input: Prompt the user at command line for manual configuration?
[ "Prompts", "the", "user", "to", "enter", "new", "config", "using", "context", "as", "a", "source", "for", "the", "field", "names", "and", "sample", "values", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/prompt.py#L189-L242
train
audreyr/cookiecutter
cookiecutter/environment.py
ExtensionLoaderMixin._read_extensions
def _read_extensions(self, context): """Return list of extensions as str to be passed on to the Jinja2 env. If context does not contain the relevant info, return an empty list instead. """ try: extensions = context['cookiecutter']['_extensions'] except KeyError: return [] else: return [str(ext) for ext in extensions]
python
def _read_extensions(self, context): """Return list of extensions as str to be passed on to the Jinja2 env. If context does not contain the relevant info, return an empty list instead. """ try: extensions = context['cookiecutter']['_extensions'] except KeyError: return [] else: return [str(ext) for ext in extensions]
[ "def", "_read_extensions", "(", "self", ",", "context", ")", ":", "try", ":", "extensions", "=", "context", "[", "'cookiecutter'", "]", "[", "'_extensions'", "]", "except", "KeyError", ":", "return", "[", "]", "else", ":", "return", "[", "str", "(", "ext", ")", "for", "ext", "in", "extensions", "]" ]
Return list of extensions as str to be passed on to the Jinja2 env. If context does not contain the relevant info, return an empty list instead.
[ "Return", "list", "of", "extensions", "as", "str", "to", "be", "passed", "on", "to", "the", "Jinja2", "env", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/environment.py#L42-L53
train
audreyr/cookiecutter
cookiecutter/log.py
configure_logger
def configure_logger(stream_level='DEBUG', debug_file=None): """Configure logging for cookiecutter. Set up logging to stdout with given level. If ``debug_file`` is given set up logging to file with DEBUG level. """ # Set up 'cookiecutter' logger logger = logging.getLogger('cookiecutter') logger.setLevel(logging.DEBUG) # Remove all attached handlers, in case there was # a logger with using the name 'cookiecutter' del logger.handlers[:] # Create a file handler if a log file is provided if debug_file is not None: debug_formatter = logging.Formatter(LOG_FORMATS['DEBUG']) file_handler = logging.FileHandler(debug_file) file_handler.setLevel(LOG_LEVELS['DEBUG']) file_handler.setFormatter(debug_formatter) logger.addHandler(file_handler) # Get settings based on the given stream_level log_formatter = logging.Formatter(LOG_FORMATS[stream_level]) log_level = LOG_LEVELS[stream_level] # Create a stream handler stream_handler = logging.StreamHandler(stream=sys.stdout) stream_handler.setLevel(log_level) stream_handler.setFormatter(log_formatter) logger.addHandler(stream_handler) return logger
python
def configure_logger(stream_level='DEBUG', debug_file=None): """Configure logging for cookiecutter. Set up logging to stdout with given level. If ``debug_file`` is given set up logging to file with DEBUG level. """ # Set up 'cookiecutter' logger logger = logging.getLogger('cookiecutter') logger.setLevel(logging.DEBUG) # Remove all attached handlers, in case there was # a logger with using the name 'cookiecutter' del logger.handlers[:] # Create a file handler if a log file is provided if debug_file is not None: debug_formatter = logging.Formatter(LOG_FORMATS['DEBUG']) file_handler = logging.FileHandler(debug_file) file_handler.setLevel(LOG_LEVELS['DEBUG']) file_handler.setFormatter(debug_formatter) logger.addHandler(file_handler) # Get settings based on the given stream_level log_formatter = logging.Formatter(LOG_FORMATS[stream_level]) log_level = LOG_LEVELS[stream_level] # Create a stream handler stream_handler = logging.StreamHandler(stream=sys.stdout) stream_handler.setLevel(log_level) stream_handler.setFormatter(log_formatter) logger.addHandler(stream_handler) return logger
[ "def", "configure_logger", "(", "stream_level", "=", "'DEBUG'", ",", "debug_file", "=", "None", ")", ":", "# Set up 'cookiecutter' logger", "logger", "=", "logging", ".", "getLogger", "(", "'cookiecutter'", ")", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "# Remove all attached handlers, in case there was", "# a logger with using the name 'cookiecutter'", "del", "logger", ".", "handlers", "[", ":", "]", "# Create a file handler if a log file is provided", "if", "debug_file", "is", "not", "None", ":", "debug_formatter", "=", "logging", ".", "Formatter", "(", "LOG_FORMATS", "[", "'DEBUG'", "]", ")", "file_handler", "=", "logging", ".", "FileHandler", "(", "debug_file", ")", "file_handler", ".", "setLevel", "(", "LOG_LEVELS", "[", "'DEBUG'", "]", ")", "file_handler", ".", "setFormatter", "(", "debug_formatter", ")", "logger", ".", "addHandler", "(", "file_handler", ")", "# Get settings based on the given stream_level", "log_formatter", "=", "logging", ".", "Formatter", "(", "LOG_FORMATS", "[", "stream_level", "]", ")", "log_level", "=", "LOG_LEVELS", "[", "stream_level", "]", "# Create a stream handler", "stream_handler", "=", "logging", ".", "StreamHandler", "(", "stream", "=", "sys", ".", "stdout", ")", "stream_handler", ".", "setLevel", "(", "log_level", ")", "stream_handler", ".", "setFormatter", "(", "log_formatter", ")", "logger", ".", "addHandler", "(", "stream_handler", ")", "return", "logger" ]
Configure logging for cookiecutter. Set up logging to stdout with given level. If ``debug_file`` is given set up logging to file with DEBUG level.
[ "Configure", "logging", "for", "cookiecutter", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/log.py#L22-L54
train
audreyr/cookiecutter
cookiecutter/vcs.py
identify_repo
def identify_repo(repo_url): """Determine if `repo_url` should be treated as a URL to a git or hg repo. Repos can be identified by prepending "hg+" or "git+" to the repo URL. :param repo_url: Repo URL of unknown type. :returns: ('git', repo_url), ('hg', repo_url), or None. """ repo_url_values = repo_url.split('+') if len(repo_url_values) == 2: repo_type = repo_url_values[0] if repo_type in ["git", "hg"]: return repo_type, repo_url_values[1] else: raise UnknownRepoType else: if 'git' in repo_url: return 'git', repo_url elif 'bitbucket' in repo_url: return 'hg', repo_url else: raise UnknownRepoType
python
def identify_repo(repo_url): """Determine if `repo_url` should be treated as a URL to a git or hg repo. Repos can be identified by prepending "hg+" or "git+" to the repo URL. :param repo_url: Repo URL of unknown type. :returns: ('git', repo_url), ('hg', repo_url), or None. """ repo_url_values = repo_url.split('+') if len(repo_url_values) == 2: repo_type = repo_url_values[0] if repo_type in ["git", "hg"]: return repo_type, repo_url_values[1] else: raise UnknownRepoType else: if 'git' in repo_url: return 'git', repo_url elif 'bitbucket' in repo_url: return 'hg', repo_url else: raise UnknownRepoType
[ "def", "identify_repo", "(", "repo_url", ")", ":", "repo_url_values", "=", "repo_url", ".", "split", "(", "'+'", ")", "if", "len", "(", "repo_url_values", ")", "==", "2", ":", "repo_type", "=", "repo_url_values", "[", "0", "]", "if", "repo_type", "in", "[", "\"git\"", ",", "\"hg\"", "]", ":", "return", "repo_type", ",", "repo_url_values", "[", "1", "]", "else", ":", "raise", "UnknownRepoType", "else", ":", "if", "'git'", "in", "repo_url", ":", "return", "'git'", ",", "repo_url", "elif", "'bitbucket'", "in", "repo_url", ":", "return", "'hg'", ",", "repo_url", "else", ":", "raise", "UnknownRepoType" ]
Determine if `repo_url` should be treated as a URL to a git or hg repo. Repos can be identified by prepending "hg+" or "git+" to the repo URL. :param repo_url: Repo URL of unknown type. :returns: ('git', repo_url), ('hg', repo_url), or None.
[ "Determine", "if", "repo_url", "should", "be", "treated", "as", "a", "URL", "to", "a", "git", "or", "hg", "repo", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/vcs.py#L26-L47
train
audreyr/cookiecutter
cookiecutter/vcs.py
clone
def clone(repo_url, checkout=None, clone_to_dir='.', no_input=False): """Clone a repo to the current directory. :param repo_url: Repo URL of unknown type. :param checkout: The branch, tag or commit ID to checkout after clone. :param clone_to_dir: The directory to clone to. Defaults to the current directory. :param no_input: Suppress all user prompts when calling via API. """ # Ensure that clone_to_dir exists clone_to_dir = os.path.expanduser(clone_to_dir) make_sure_path_exists(clone_to_dir) # identify the repo_type repo_type, repo_url = identify_repo(repo_url) # check that the appropriate VCS for the repo_type is installed if not is_vcs_installed(repo_type): msg = "'{0}' is not installed.".format(repo_type) raise VCSNotInstalled(msg) repo_url = repo_url.rstrip('/') tail = os.path.split(repo_url)[1] if repo_type == 'git': repo_dir = os.path.normpath(os.path.join(clone_to_dir, tail.rsplit('.git')[0])) elif repo_type == 'hg': repo_dir = os.path.normpath(os.path.join(clone_to_dir, tail)) logger.debug('repo_dir is {0}'.format(repo_dir)) if os.path.isdir(repo_dir): clone = prompt_and_delete(repo_dir, no_input=no_input) else: clone = True if clone: try: subprocess.check_output( [repo_type, 'clone', repo_url], cwd=clone_to_dir, stderr=subprocess.STDOUT, ) if checkout is not None: subprocess.check_output( [repo_type, 'checkout', checkout], cwd=repo_dir, stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as clone_error: output = clone_error.output.decode('utf-8') if 'not found' in output.lower(): raise RepositoryNotFound( 'The repository {} could not be found, ' 'have you made a typo?'.format(repo_url) ) if any(error in output for error in BRANCH_ERRORS): raise RepositoryCloneFailed( 'The {} branch of repository {} could not found, ' 'have you made a typo?'.format(checkout, repo_url) ) raise return repo_dir
python
def clone(repo_url, checkout=None, clone_to_dir='.', no_input=False): """Clone a repo to the current directory. :param repo_url: Repo URL of unknown type. :param checkout: The branch, tag or commit ID to checkout after clone. :param clone_to_dir: The directory to clone to. Defaults to the current directory. :param no_input: Suppress all user prompts when calling via API. """ # Ensure that clone_to_dir exists clone_to_dir = os.path.expanduser(clone_to_dir) make_sure_path_exists(clone_to_dir) # identify the repo_type repo_type, repo_url = identify_repo(repo_url) # check that the appropriate VCS for the repo_type is installed if not is_vcs_installed(repo_type): msg = "'{0}' is not installed.".format(repo_type) raise VCSNotInstalled(msg) repo_url = repo_url.rstrip('/') tail = os.path.split(repo_url)[1] if repo_type == 'git': repo_dir = os.path.normpath(os.path.join(clone_to_dir, tail.rsplit('.git')[0])) elif repo_type == 'hg': repo_dir = os.path.normpath(os.path.join(clone_to_dir, tail)) logger.debug('repo_dir is {0}'.format(repo_dir)) if os.path.isdir(repo_dir): clone = prompt_and_delete(repo_dir, no_input=no_input) else: clone = True if clone: try: subprocess.check_output( [repo_type, 'clone', repo_url], cwd=clone_to_dir, stderr=subprocess.STDOUT, ) if checkout is not None: subprocess.check_output( [repo_type, 'checkout', checkout], cwd=repo_dir, stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as clone_error: output = clone_error.output.decode('utf-8') if 'not found' in output.lower(): raise RepositoryNotFound( 'The repository {} could not be found, ' 'have you made a typo?'.format(repo_url) ) if any(error in output for error in BRANCH_ERRORS): raise RepositoryCloneFailed( 'The {} branch of repository {} could not found, ' 'have you made a typo?'.format(checkout, repo_url) ) raise return repo_dir
[ "def", "clone", "(", "repo_url", ",", "checkout", "=", "None", ",", "clone_to_dir", "=", "'.'", ",", "no_input", "=", "False", ")", ":", "# Ensure that clone_to_dir exists", "clone_to_dir", "=", "os", ".", "path", ".", "expanduser", "(", "clone_to_dir", ")", "make_sure_path_exists", "(", "clone_to_dir", ")", "# identify the repo_type", "repo_type", ",", "repo_url", "=", "identify_repo", "(", "repo_url", ")", "# check that the appropriate VCS for the repo_type is installed", "if", "not", "is_vcs_installed", "(", "repo_type", ")", ":", "msg", "=", "\"'{0}' is not installed.\"", ".", "format", "(", "repo_type", ")", "raise", "VCSNotInstalled", "(", "msg", ")", "repo_url", "=", "repo_url", ".", "rstrip", "(", "'/'", ")", "tail", "=", "os", ".", "path", ".", "split", "(", "repo_url", ")", "[", "1", "]", "if", "repo_type", "==", "'git'", ":", "repo_dir", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "clone_to_dir", ",", "tail", ".", "rsplit", "(", "'.git'", ")", "[", "0", "]", ")", ")", "elif", "repo_type", "==", "'hg'", ":", "repo_dir", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "clone_to_dir", ",", "tail", ")", ")", "logger", ".", "debug", "(", "'repo_dir is {0}'", ".", "format", "(", "repo_dir", ")", ")", "if", "os", ".", "path", ".", "isdir", "(", "repo_dir", ")", ":", "clone", "=", "prompt_and_delete", "(", "repo_dir", ",", "no_input", "=", "no_input", ")", "else", ":", "clone", "=", "True", "if", "clone", ":", "try", ":", "subprocess", ".", "check_output", "(", "[", "repo_type", ",", "'clone'", ",", "repo_url", "]", ",", "cwd", "=", "clone_to_dir", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", ")", "if", "checkout", "is", "not", "None", ":", "subprocess", ".", "check_output", "(", "[", "repo_type", ",", "'checkout'", ",", "checkout", "]", ",", "cwd", "=", "repo_dir", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", ")", "except", "subprocess", ".", "CalledProcessError", "as", "clone_error", ":", "output", "=", "clone_error", ".", "output", ".", "decode", "(", "'utf-8'", ")", "if", "'not found'", "in", "output", ".", "lower", "(", ")", ":", "raise", "RepositoryNotFound", "(", "'The repository {} could not be found, '", "'have you made a typo?'", ".", "format", "(", "repo_url", ")", ")", "if", "any", "(", "error", "in", "output", "for", "error", "in", "BRANCH_ERRORS", ")", ":", "raise", "RepositoryCloneFailed", "(", "'The {} branch of repository {} could not found, '", "'have you made a typo?'", ".", "format", "(", "checkout", ",", "repo_url", ")", ")", "raise", "return", "repo_dir" ]
Clone a repo to the current directory. :param repo_url: Repo URL of unknown type. :param checkout: The branch, tag or commit ID to checkout after clone. :param clone_to_dir: The directory to clone to. Defaults to the current directory. :param no_input: Suppress all user prompts when calling via API.
[ "Clone", "a", "repo", "to", "the", "current", "directory", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/vcs.py#L59-L121
train
audreyr/cookiecutter
cookiecutter/hooks.py
valid_hook
def valid_hook(hook_file, hook_name): """Determine if a hook file is valid. :param hook_file: The hook file to consider for validity :param hook_name: The hook to find :return: The hook file validity """ filename = os.path.basename(hook_file) basename = os.path.splitext(filename)[0] matching_hook = basename == hook_name supported_hook = basename in _HOOKS backup_file = filename.endswith('~') return matching_hook and supported_hook and not backup_file
python
def valid_hook(hook_file, hook_name): """Determine if a hook file is valid. :param hook_file: The hook file to consider for validity :param hook_name: The hook to find :return: The hook file validity """ filename = os.path.basename(hook_file) basename = os.path.splitext(filename)[0] matching_hook = basename == hook_name supported_hook = basename in _HOOKS backup_file = filename.endswith('~') return matching_hook and supported_hook and not backup_file
[ "def", "valid_hook", "(", "hook_file", ",", "hook_name", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "hook_file", ")", "basename", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "matching_hook", "=", "basename", "==", "hook_name", "supported_hook", "=", "basename", "in", "_HOOKS", "backup_file", "=", "filename", ".", "endswith", "(", "'~'", ")", "return", "matching_hook", "and", "supported_hook", "and", "not", "backup_file" ]
Determine if a hook file is valid. :param hook_file: The hook file to consider for validity :param hook_name: The hook to find :return: The hook file validity
[ "Determine", "if", "a", "hook", "file", "is", "valid", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/hooks.py#L26-L40
train
audreyr/cookiecutter
cookiecutter/hooks.py
find_hook
def find_hook(hook_name, hooks_dir='hooks'): """Return a dict of all hook scripts provided. Must be called with the project template as the current working directory. Dict's key will be the hook/script's name, without extension, while values will be the absolute path to the script. Missing scripts will not be included in the returned dict. :param hook_name: The hook to find :param hooks_dir: The hook directory in the template :return: The absolute path to the hook script or None """ logger.debug('hooks_dir is {}'.format(os.path.abspath(hooks_dir))) if not os.path.isdir(hooks_dir): logger.debug('No hooks/ dir in template_dir') return None for hook_file in os.listdir(hooks_dir): if valid_hook(hook_file, hook_name): return os.path.abspath(os.path.join(hooks_dir, hook_file)) return None
python
def find_hook(hook_name, hooks_dir='hooks'): """Return a dict of all hook scripts provided. Must be called with the project template as the current working directory. Dict's key will be the hook/script's name, without extension, while values will be the absolute path to the script. Missing scripts will not be included in the returned dict. :param hook_name: The hook to find :param hooks_dir: The hook directory in the template :return: The absolute path to the hook script or None """ logger.debug('hooks_dir is {}'.format(os.path.abspath(hooks_dir))) if not os.path.isdir(hooks_dir): logger.debug('No hooks/ dir in template_dir') return None for hook_file in os.listdir(hooks_dir): if valid_hook(hook_file, hook_name): return os.path.abspath(os.path.join(hooks_dir, hook_file)) return None
[ "def", "find_hook", "(", "hook_name", ",", "hooks_dir", "=", "'hooks'", ")", ":", "logger", ".", "debug", "(", "'hooks_dir is {}'", ".", "format", "(", "os", ".", "path", ".", "abspath", "(", "hooks_dir", ")", ")", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "hooks_dir", ")", ":", "logger", ".", "debug", "(", "'No hooks/ dir in template_dir'", ")", "return", "None", "for", "hook_file", "in", "os", ".", "listdir", "(", "hooks_dir", ")", ":", "if", "valid_hook", "(", "hook_file", ",", "hook_name", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "hooks_dir", ",", "hook_file", ")", ")", "return", "None" ]
Return a dict of all hook scripts provided. Must be called with the project template as the current working directory. Dict's key will be the hook/script's name, without extension, while values will be the absolute path to the script. Missing scripts will not be included in the returned dict. :param hook_name: The hook to find :param hooks_dir: The hook directory in the template :return: The absolute path to the hook script or None
[ "Return", "a", "dict", "of", "all", "hook", "scripts", "provided", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/hooks.py#L43-L65
train
audreyr/cookiecutter
cookiecutter/hooks.py
run_script
def run_script(script_path, cwd='.'): """Execute a script from a working directory. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. """ run_thru_shell = sys.platform.startswith('win') if script_path.endswith('.py'): script_command = [sys.executable, script_path] else: script_command = [script_path] utils.make_executable(script_path) try: proc = subprocess.Popen( script_command, shell=run_thru_shell, cwd=cwd ) exit_status = proc.wait() if exit_status != EXIT_SUCCESS: raise FailedHookException( 'Hook script failed (exit status: {})'.format(exit_status) ) except OSError as os_error: if os_error.errno == errno.ENOEXEC: raise FailedHookException( 'Hook script failed, might be an ' 'empty file or missing a shebang' ) raise FailedHookException( 'Hook script failed (error: {})'.format(os_error) )
python
def run_script(script_path, cwd='.'): """Execute a script from a working directory. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. """ run_thru_shell = sys.platform.startswith('win') if script_path.endswith('.py'): script_command = [sys.executable, script_path] else: script_command = [script_path] utils.make_executable(script_path) try: proc = subprocess.Popen( script_command, shell=run_thru_shell, cwd=cwd ) exit_status = proc.wait() if exit_status != EXIT_SUCCESS: raise FailedHookException( 'Hook script failed (exit status: {})'.format(exit_status) ) except OSError as os_error: if os_error.errno == errno.ENOEXEC: raise FailedHookException( 'Hook script failed, might be an ' 'empty file or missing a shebang' ) raise FailedHookException( 'Hook script failed (error: {})'.format(os_error) )
[ "def", "run_script", "(", "script_path", ",", "cwd", "=", "'.'", ")", ":", "run_thru_shell", "=", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", "if", "script_path", ".", "endswith", "(", "'.py'", ")", ":", "script_command", "=", "[", "sys", ".", "executable", ",", "script_path", "]", "else", ":", "script_command", "=", "[", "script_path", "]", "utils", ".", "make_executable", "(", "script_path", ")", "try", ":", "proc", "=", "subprocess", ".", "Popen", "(", "script_command", ",", "shell", "=", "run_thru_shell", ",", "cwd", "=", "cwd", ")", "exit_status", "=", "proc", ".", "wait", "(", ")", "if", "exit_status", "!=", "EXIT_SUCCESS", ":", "raise", "FailedHookException", "(", "'Hook script failed (exit status: {})'", ".", "format", "(", "exit_status", ")", ")", "except", "OSError", "as", "os_error", ":", "if", "os_error", ".", "errno", "==", "errno", ".", "ENOEXEC", ":", "raise", "FailedHookException", "(", "'Hook script failed, might be an '", "'empty file or missing a shebang'", ")", "raise", "FailedHookException", "(", "'Hook script failed (error: {})'", ".", "format", "(", "os_error", ")", ")" ]
Execute a script from a working directory. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from.
[ "Execute", "a", "script", "from", "a", "working", "directory", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/hooks.py#L68-L101
train
audreyr/cookiecutter
cookiecutter/hooks.py
run_script_with_context
def run_script_with_context(script_path, cwd, context): """Execute a script after rendering it with Jinja. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. :param context: Cookiecutter project template context. """ _, extension = os.path.splitext(script_path) contents = io.open(script_path, 'r', encoding='utf-8').read() with tempfile.NamedTemporaryFile( delete=False, mode='wb', suffix=extension ) as temp: env = StrictEnvironment( context=context, keep_trailing_newline=True, ) template = env.from_string(contents) output = template.render(**context) temp.write(output.encode('utf-8')) run_script(temp.name, cwd)
python
def run_script_with_context(script_path, cwd, context): """Execute a script after rendering it with Jinja. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. :param context: Cookiecutter project template context. """ _, extension = os.path.splitext(script_path) contents = io.open(script_path, 'r', encoding='utf-8').read() with tempfile.NamedTemporaryFile( delete=False, mode='wb', suffix=extension ) as temp: env = StrictEnvironment( context=context, keep_trailing_newline=True, ) template = env.from_string(contents) output = template.render(**context) temp.write(output.encode('utf-8')) run_script(temp.name, cwd)
[ "def", "run_script_with_context", "(", "script_path", ",", "cwd", ",", "context", ")", ":", "_", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "script_path", ")", "contents", "=", "io", ".", "open", "(", "script_path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", ".", "read", "(", ")", "with", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "mode", "=", "'wb'", ",", "suffix", "=", "extension", ")", "as", "temp", ":", "env", "=", "StrictEnvironment", "(", "context", "=", "context", ",", "keep_trailing_newline", "=", "True", ",", ")", "template", "=", "env", ".", "from_string", "(", "contents", ")", "output", "=", "template", ".", "render", "(", "*", "*", "context", ")", "temp", ".", "write", "(", "output", ".", "encode", "(", "'utf-8'", ")", ")", "run_script", "(", "temp", ".", "name", ",", "cwd", ")" ]
Execute a script after rendering it with Jinja. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. :param context: Cookiecutter project template context.
[ "Execute", "a", "script", "after", "rendering", "it", "with", "Jinja", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/hooks.py#L104-L128
train
audreyr/cookiecutter
cookiecutter/hooks.py
run_hook
def run_hook(hook_name, project_dir, context): """ Try to find and execute a hook from the specified project directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. """ script = find_hook(hook_name) if script is None: logger.debug('No {} hook found'.format(hook_name)) return logger.debug('Running hook {}'.format(hook_name)) run_script_with_context(script, project_dir, context)
python
def run_hook(hook_name, project_dir, context): """ Try to find and execute a hook from the specified project directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context. """ script = find_hook(hook_name) if script is None: logger.debug('No {} hook found'.format(hook_name)) return logger.debug('Running hook {}'.format(hook_name)) run_script_with_context(script, project_dir, context)
[ "def", "run_hook", "(", "hook_name", ",", "project_dir", ",", "context", ")", ":", "script", "=", "find_hook", "(", "hook_name", ")", "if", "script", "is", "None", ":", "logger", ".", "debug", "(", "'No {} hook found'", ".", "format", "(", "hook_name", ")", ")", "return", "logger", ".", "debug", "(", "'Running hook {}'", ".", "format", "(", "hook_name", ")", ")", "run_script_with_context", "(", "script", ",", "project_dir", ",", "context", ")" ]
Try to find and execute a hook from the specified project directory. :param hook_name: The hook to execute. :param project_dir: The directory to execute the script from. :param context: Cookiecutter project context.
[ "Try", "to", "find", "and", "execute", "a", "hook", "from", "the", "specified", "project", "directory", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/hooks.py#L131-L144
train
audreyr/cookiecutter
cookiecutter/cli.py
version_msg
def version_msg(): """Return the Cookiecutter version, location and Python powering it.""" python_version = sys.version[:3] location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) message = u'Cookiecutter %(version)s from {} (Python {})' return message.format(location, python_version)
python
def version_msg(): """Return the Cookiecutter version, location and Python powering it.""" python_version = sys.version[:3] location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) message = u'Cookiecutter %(version)s from {} (Python {})' return message.format(location, python_version)
[ "def", "version_msg", "(", ")", ":", "python_version", "=", "sys", ".", "version", "[", ":", "3", "]", "location", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ")", "message", "=", "u'Cookiecutter %(version)s from {} (Python {})'", "return", "message", ".", "format", "(", "location", ",", "python_version", ")" ]
Return the Cookiecutter version, location and Python powering it.
[ "Return", "the", "Cookiecutter", "version", "location", "and", "Python", "powering", "it", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/cli.py#L27-L32
train
audreyr/cookiecutter
cookiecutter/cli.py
validate_extra_context
def validate_extra_context(ctx, param, value): """Validate extra context.""" for s in value: if '=' not in s: raise click.BadParameter( 'EXTRA_CONTEXT should contain items of the form key=value; ' "'{}' doesn't match that form".format(s) ) # Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66') # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'} return collections.OrderedDict(s.split('=', 1) for s in value) or None
python
def validate_extra_context(ctx, param, value): """Validate extra context.""" for s in value: if '=' not in s: raise click.BadParameter( 'EXTRA_CONTEXT should contain items of the form key=value; ' "'{}' doesn't match that form".format(s) ) # Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66') # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'} return collections.OrderedDict(s.split('=', 1) for s in value) or None
[ "def", "validate_extra_context", "(", "ctx", ",", "param", ",", "value", ")", ":", "for", "s", "in", "value", ":", "if", "'='", "not", "in", "s", ":", "raise", "click", ".", "BadParameter", "(", "'EXTRA_CONTEXT should contain items of the form key=value; '", "\"'{}' doesn't match that form\"", ".", "format", "(", "s", ")", ")", "# Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66')", "# to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'}", "return", "collections", ".", "OrderedDict", "(", "s", ".", "split", "(", "'='", ",", "1", ")", "for", "s", "in", "value", ")", "or", "None" ]
Validate extra context.
[ "Validate", "extra", "context", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/cli.py#L35-L46
train
audreyr/cookiecutter
cookiecutter/cli.py
main
def main( template, extra_context, no_input, checkout, verbose, replay, overwrite_if_exists, output_dir, config_file, default_config, debug_file): """Create a project from a Cookiecutter project template (TEMPLATE). Cookiecutter is free and open source software, developed and managed by volunteers. If you would like to help out or fund the project, please get in touch at https://github.com/audreyr/cookiecutter. """ # If you _need_ to support a local template in a directory # called 'help', use a qualified path to the directory. if template == u'help': click.echo(click.get_current_context().get_help()) sys.exit(0) configure_logger( stream_level='DEBUG' if verbose else 'INFO', debug_file=debug_file, ) try: cookiecutter( template, checkout, no_input, extra_context=extra_context, replay=replay, overwrite_if_exists=overwrite_if_exists, output_dir=output_dir, config_file=config_file, default_config=default_config, password=os.environ.get('COOKIECUTTER_REPO_PASSWORD') ) except (OutputDirExistsException, InvalidModeException, FailedHookException, UnknownExtension, InvalidZipRepository, RepositoryNotFound, RepositoryCloneFailed) as e: click.echo(e) sys.exit(1) except UndefinedVariableInTemplate as undefined_err: click.echo('{}'.format(undefined_err.message)) click.echo('Error message: {}'.format(undefined_err.error.message)) context_str = json.dumps( undefined_err.context, indent=4, sort_keys=True ) click.echo('Context: {}'.format(context_str)) sys.exit(1)
python
def main( template, extra_context, no_input, checkout, verbose, replay, overwrite_if_exists, output_dir, config_file, default_config, debug_file): """Create a project from a Cookiecutter project template (TEMPLATE). Cookiecutter is free and open source software, developed and managed by volunteers. If you would like to help out or fund the project, please get in touch at https://github.com/audreyr/cookiecutter. """ # If you _need_ to support a local template in a directory # called 'help', use a qualified path to the directory. if template == u'help': click.echo(click.get_current_context().get_help()) sys.exit(0) configure_logger( stream_level='DEBUG' if verbose else 'INFO', debug_file=debug_file, ) try: cookiecutter( template, checkout, no_input, extra_context=extra_context, replay=replay, overwrite_if_exists=overwrite_if_exists, output_dir=output_dir, config_file=config_file, default_config=default_config, password=os.environ.get('COOKIECUTTER_REPO_PASSWORD') ) except (OutputDirExistsException, InvalidModeException, FailedHookException, UnknownExtension, InvalidZipRepository, RepositoryNotFound, RepositoryCloneFailed) as e: click.echo(e) sys.exit(1) except UndefinedVariableInTemplate as undefined_err: click.echo('{}'.format(undefined_err.message)) click.echo('Error message: {}'.format(undefined_err.error.message)) context_str = json.dumps( undefined_err.context, indent=4, sort_keys=True ) click.echo('Context: {}'.format(context_str)) sys.exit(1)
[ "def", "main", "(", "template", ",", "extra_context", ",", "no_input", ",", "checkout", ",", "verbose", ",", "replay", ",", "overwrite_if_exists", ",", "output_dir", ",", "config_file", ",", "default_config", ",", "debug_file", ")", ":", "# If you _need_ to support a local template in a directory", "# called 'help', use a qualified path to the directory.", "if", "template", "==", "u'help'", ":", "click", ".", "echo", "(", "click", ".", "get_current_context", "(", ")", ".", "get_help", "(", ")", ")", "sys", ".", "exit", "(", "0", ")", "configure_logger", "(", "stream_level", "=", "'DEBUG'", "if", "verbose", "else", "'INFO'", ",", "debug_file", "=", "debug_file", ",", ")", "try", ":", "cookiecutter", "(", "template", ",", "checkout", ",", "no_input", ",", "extra_context", "=", "extra_context", ",", "replay", "=", "replay", ",", "overwrite_if_exists", "=", "overwrite_if_exists", ",", "output_dir", "=", "output_dir", ",", "config_file", "=", "config_file", ",", "default_config", "=", "default_config", ",", "password", "=", "os", ".", "environ", ".", "get", "(", "'COOKIECUTTER_REPO_PASSWORD'", ")", ")", "except", "(", "OutputDirExistsException", ",", "InvalidModeException", ",", "FailedHookException", ",", "UnknownExtension", ",", "InvalidZipRepository", ",", "RepositoryNotFound", ",", "RepositoryCloneFailed", ")", "as", "e", ":", "click", ".", "echo", "(", "e", ")", "sys", ".", "exit", "(", "1", ")", "except", "UndefinedVariableInTemplate", "as", "undefined_err", ":", "click", ".", "echo", "(", "'{}'", ".", "format", "(", "undefined_err", ".", "message", ")", ")", "click", ".", "echo", "(", "'Error message: {}'", ".", "format", "(", "undefined_err", ".", "error", ".", "message", ")", ")", "context_str", "=", "json", ".", "dumps", "(", "undefined_err", ".", "context", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", "click", ".", "echo", "(", "'Context: {}'", ".", "format", "(", "context_str", ")", ")", "sys", ".", "exit", "(", "1", ")" ]
Create a project from a Cookiecutter project template (TEMPLATE). Cookiecutter is free and open source software, developed and managed by volunteers. If you would like to help out or fund the project, please get in touch at https://github.com/audreyr/cookiecutter.
[ "Create", "a", "project", "from", "a", "Cookiecutter", "project", "template", "(", "TEMPLATE", ")", "." ]
3bc7b987e4ae9dcee996ae0b00375c1325b8d866
https://github.com/audreyr/cookiecutter/blob/3bc7b987e4ae9dcee996ae0b00375c1325b8d866/cookiecutter/cli.py#L91-L142
train
graphql-python/graphene
graphene/types/mountedtype.py
MountedType.mounted
def mounted(cls, unmounted): # noqa: N802 """ Mount the UnmountedType instance """ assert isinstance(unmounted, UnmountedType), ("{} can't mount {}").format( cls.__name__, repr(unmounted) ) return cls( unmounted.get_type(), *unmounted.args, _creation_counter=unmounted.creation_counter, **unmounted.kwargs )
python
def mounted(cls, unmounted): # noqa: N802 """ Mount the UnmountedType instance """ assert isinstance(unmounted, UnmountedType), ("{} can't mount {}").format( cls.__name__, repr(unmounted) ) return cls( unmounted.get_type(), *unmounted.args, _creation_counter=unmounted.creation_counter, **unmounted.kwargs )
[ "def", "mounted", "(", "cls", ",", "unmounted", ")", ":", "# noqa: N802", "assert", "isinstance", "(", "unmounted", ",", "UnmountedType", ")", ",", "(", "\"{} can't mount {}\"", ")", ".", "format", "(", "cls", ".", "__name__", ",", "repr", "(", "unmounted", ")", ")", "return", "cls", "(", "unmounted", ".", "get_type", "(", ")", ",", "*", "unmounted", ".", "args", ",", "_creation_counter", "=", "unmounted", ".", "creation_counter", ",", "*", "*", "unmounted", ".", "kwargs", ")" ]
Mount the UnmountedType instance
[ "Mount", "the", "UnmountedType", "instance" ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/types/mountedtype.py#L7-L20
train
graphql-python/graphene
graphene/pyutils/signature.py
Parameter.replace
def replace( self, name=_void, kind=_void, annotation=_void, default=_void, _partial_kwarg=_void, ): """Creates a customized copy of the Parameter.""" if name is _void: name = self._name if kind is _void: kind = self._kind if annotation is _void: annotation = self._annotation if default is _void: default = self._default if _partial_kwarg is _void: _partial_kwarg = self._partial_kwarg return type(self)( name, kind, default=default, annotation=annotation, _partial_kwarg=_partial_kwarg, )
python
def replace( self, name=_void, kind=_void, annotation=_void, default=_void, _partial_kwarg=_void, ): """Creates a customized copy of the Parameter.""" if name is _void: name = self._name if kind is _void: kind = self._kind if annotation is _void: annotation = self._annotation if default is _void: default = self._default if _partial_kwarg is _void: _partial_kwarg = self._partial_kwarg return type(self)( name, kind, default=default, annotation=annotation, _partial_kwarg=_partial_kwarg, )
[ "def", "replace", "(", "self", ",", "name", "=", "_void", ",", "kind", "=", "_void", ",", "annotation", "=", "_void", ",", "default", "=", "_void", ",", "_partial_kwarg", "=", "_void", ",", ")", ":", "if", "name", "is", "_void", ":", "name", "=", "self", ".", "_name", "if", "kind", "is", "_void", ":", "kind", "=", "self", ".", "_kind", "if", "annotation", "is", "_void", ":", "annotation", "=", "self", ".", "_annotation", "if", "default", "is", "_void", ":", "default", "=", "self", ".", "_default", "if", "_partial_kwarg", "is", "_void", ":", "_partial_kwarg", "=", "self", ".", "_partial_kwarg", "return", "type", "(", "self", ")", "(", "name", ",", "kind", ",", "default", "=", "default", ",", "annotation", "=", "annotation", ",", "_partial_kwarg", "=", "_partial_kwarg", ",", ")" ]
Creates a customized copy of the Parameter.
[ "Creates", "a", "customized", "copy", "of", "the", "Parameter", "." ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/pyutils/signature.py#L288-L319
train
graphql-python/graphene
graphene/pyutils/signature.py
Signature.from_function
def from_function(cls, func): """Constructs Signature for the given python function""" if not isinstance(func, types.FunctionType): raise TypeError("{!r} is not a Python function".format(func)) Parameter = cls._parameter_cls # Parameter information. func_code = func.__code__ pos_count = func_code.co_argcount arg_names = func_code.co_varnames positional = tuple(arg_names[:pos_count]) keyword_only_count = getattr(func_code, "co_kwonlyargcount", 0) keyword_only = arg_names[pos_count : (pos_count + keyword_only_count)] annotations = getattr(func, "__annotations__", {}) defaults = func.__defaults__ kwdefaults = getattr(func, "__kwdefaults__", None) if defaults: pos_default_count = len(defaults) else: pos_default_count = 0 parameters = [] # Non-keyword-only parameters w/o defaults. non_default_count = pos_count - pos_default_count for name in positional[:non_default_count]: annotation = annotations.get(name, _empty) parameters.append( Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD) ) # ... w/ defaults. for offset, name in enumerate(positional[non_default_count:]): annotation = annotations.get(name, _empty) parameters.append( Parameter( name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD, default=defaults[offset], ) ) # *args if func_code.co_flags & 0x04: name = arg_names[pos_count + keyword_only_count] annotation = annotations.get(name, _empty) parameters.append( Parameter(name, annotation=annotation, kind=_VAR_POSITIONAL) ) # Keyword-only parameters. for name in keyword_only: default = _empty if kwdefaults is not None: default = kwdefaults.get(name, _empty) annotation = annotations.get(name, _empty) parameters.append( Parameter( name, annotation=annotation, kind=_KEYWORD_ONLY, default=default ) ) # **kwargs if func_code.co_flags & 0x08: index = pos_count + keyword_only_count if func_code.co_flags & 0x04: index += 1 name = arg_names[index] annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_VAR_KEYWORD)) return cls( parameters, return_annotation=annotations.get("return", _empty), __validate_parameters__=False, )
python
def from_function(cls, func): """Constructs Signature for the given python function""" if not isinstance(func, types.FunctionType): raise TypeError("{!r} is not a Python function".format(func)) Parameter = cls._parameter_cls # Parameter information. func_code = func.__code__ pos_count = func_code.co_argcount arg_names = func_code.co_varnames positional = tuple(arg_names[:pos_count]) keyword_only_count = getattr(func_code, "co_kwonlyargcount", 0) keyword_only = arg_names[pos_count : (pos_count + keyword_only_count)] annotations = getattr(func, "__annotations__", {}) defaults = func.__defaults__ kwdefaults = getattr(func, "__kwdefaults__", None) if defaults: pos_default_count = len(defaults) else: pos_default_count = 0 parameters = [] # Non-keyword-only parameters w/o defaults. non_default_count = pos_count - pos_default_count for name in positional[:non_default_count]: annotation = annotations.get(name, _empty) parameters.append( Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD) ) # ... w/ defaults. for offset, name in enumerate(positional[non_default_count:]): annotation = annotations.get(name, _empty) parameters.append( Parameter( name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD, default=defaults[offset], ) ) # *args if func_code.co_flags & 0x04: name = arg_names[pos_count + keyword_only_count] annotation = annotations.get(name, _empty) parameters.append( Parameter(name, annotation=annotation, kind=_VAR_POSITIONAL) ) # Keyword-only parameters. for name in keyword_only: default = _empty if kwdefaults is not None: default = kwdefaults.get(name, _empty) annotation = annotations.get(name, _empty) parameters.append( Parameter( name, annotation=annotation, kind=_KEYWORD_ONLY, default=default ) ) # **kwargs if func_code.co_flags & 0x08: index = pos_count + keyword_only_count if func_code.co_flags & 0x04: index += 1 name = arg_names[index] annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_VAR_KEYWORD)) return cls( parameters, return_annotation=annotations.get("return", _empty), __validate_parameters__=False, )
[ "def", "from_function", "(", "cls", ",", "func", ")", ":", "if", "not", "isinstance", "(", "func", ",", "types", ".", "FunctionType", ")", ":", "raise", "TypeError", "(", "\"{!r} is not a Python function\"", ".", "format", "(", "func", ")", ")", "Parameter", "=", "cls", ".", "_parameter_cls", "# Parameter information.", "func_code", "=", "func", ".", "__code__", "pos_count", "=", "func_code", ".", "co_argcount", "arg_names", "=", "func_code", ".", "co_varnames", "positional", "=", "tuple", "(", "arg_names", "[", ":", "pos_count", "]", ")", "keyword_only_count", "=", "getattr", "(", "func_code", ",", "\"co_kwonlyargcount\"", ",", "0", ")", "keyword_only", "=", "arg_names", "[", "pos_count", ":", "(", "pos_count", "+", "keyword_only_count", ")", "]", "annotations", "=", "getattr", "(", "func", ",", "\"__annotations__\"", ",", "{", "}", ")", "defaults", "=", "func", ".", "__defaults__", "kwdefaults", "=", "getattr", "(", "func", ",", "\"__kwdefaults__\"", ",", "None", ")", "if", "defaults", ":", "pos_default_count", "=", "len", "(", "defaults", ")", "else", ":", "pos_default_count", "=", "0", "parameters", "=", "[", "]", "# Non-keyword-only parameters w/o defaults.", "non_default_count", "=", "pos_count", "-", "pos_default_count", "for", "name", "in", "positional", "[", ":", "non_default_count", "]", ":", "annotation", "=", "annotations", ".", "get", "(", "name", ",", "_empty", ")", "parameters", ".", "append", "(", "Parameter", "(", "name", ",", "annotation", "=", "annotation", ",", "kind", "=", "_POSITIONAL_OR_KEYWORD", ")", ")", "# ... w/ defaults.", "for", "offset", ",", "name", "in", "enumerate", "(", "positional", "[", "non_default_count", ":", "]", ")", ":", "annotation", "=", "annotations", ".", "get", "(", "name", ",", "_empty", ")", "parameters", ".", "append", "(", "Parameter", "(", "name", ",", "annotation", "=", "annotation", ",", "kind", "=", "_POSITIONAL_OR_KEYWORD", ",", "default", "=", "defaults", "[", "offset", "]", ",", ")", ")", "# *args", "if", "func_code", ".", "co_flags", "&", "0x04", ":", "name", "=", "arg_names", "[", "pos_count", "+", "keyword_only_count", "]", "annotation", "=", "annotations", ".", "get", "(", "name", ",", "_empty", ")", "parameters", ".", "append", "(", "Parameter", "(", "name", ",", "annotation", "=", "annotation", ",", "kind", "=", "_VAR_POSITIONAL", ")", ")", "# Keyword-only parameters.", "for", "name", "in", "keyword_only", ":", "default", "=", "_empty", "if", "kwdefaults", "is", "not", "None", ":", "default", "=", "kwdefaults", ".", "get", "(", "name", ",", "_empty", ")", "annotation", "=", "annotations", ".", "get", "(", "name", ",", "_empty", ")", "parameters", ".", "append", "(", "Parameter", "(", "name", ",", "annotation", "=", "annotation", ",", "kind", "=", "_KEYWORD_ONLY", ",", "default", "=", "default", ")", ")", "# **kwargs", "if", "func_code", ".", "co_flags", "&", "0x08", ":", "index", "=", "pos_count", "+", "keyword_only_count", "if", "func_code", ".", "co_flags", "&", "0x04", ":", "index", "+=", "1", "name", "=", "arg_names", "[", "index", "]", "annotation", "=", "annotations", ".", "get", "(", "name", ",", "_empty", ")", "parameters", ".", "append", "(", "Parameter", "(", "name", ",", "annotation", "=", "annotation", ",", "kind", "=", "_VAR_KEYWORD", ")", ")", "return", "cls", "(", "parameters", ",", "return_annotation", "=", "annotations", ".", "get", "(", "\"return\"", ",", "_empty", ")", ",", "__validate_parameters__", "=", "False", ",", ")" ]
Constructs Signature for the given python function
[ "Constructs", "Signature", "for", "the", "given", "python", "function" ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/pyutils/signature.py#L526-L606
train
graphql-python/graphene
graphene/pyutils/signature.py
Signature.replace
def replace(self, parameters=_void, return_annotation=_void): """Creates a customized copy of the Signature. Pass 'parameters' and/or 'return_annotation' arguments to override them in the new copy. """ if parameters is _void: parameters = self.parameters.values() if return_annotation is _void: return_annotation = self._return_annotation return type(self)(parameters, return_annotation=return_annotation)
python
def replace(self, parameters=_void, return_annotation=_void): """Creates a customized copy of the Signature. Pass 'parameters' and/or 'return_annotation' arguments to override them in the new copy. """ if parameters is _void: parameters = self.parameters.values() if return_annotation is _void: return_annotation = self._return_annotation return type(self)(parameters, return_annotation=return_annotation)
[ "def", "replace", "(", "self", ",", "parameters", "=", "_void", ",", "return_annotation", "=", "_void", ")", ":", "if", "parameters", "is", "_void", ":", "parameters", "=", "self", ".", "parameters", ".", "values", "(", ")", "if", "return_annotation", "is", "_void", ":", "return_annotation", "=", "self", ".", "_return_annotation", "return", "type", "(", "self", ")", "(", "parameters", ",", "return_annotation", "=", "return_annotation", ")" ]
Creates a customized copy of the Signature. Pass 'parameters' and/or 'return_annotation' arguments to override them in the new copy.
[ "Creates", "a", "customized", "copy", "of", "the", "Signature", ".", "Pass", "parameters", "and", "/", "or", "return_annotation", "arguments", "to", "override", "them", "in", "the", "new", "copy", "." ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/pyutils/signature.py#L619-L631
train
graphql-python/graphene
graphene/pyutils/signature.py
Signature._bind
def _bind(self, args, kwargs, partial=False): """Private method. Don't use directly.""" arguments = OrderedDict() parameters = iter(self.parameters.values()) parameters_ex = () arg_vals = iter(args) if partial: # Support for binding arguments to 'functools.partial' objects. # See 'functools.partial' case in 'signature()' implementation # for details. for param_name, param in self.parameters.items(): if param._partial_kwarg and param_name not in kwargs: # Simulating 'functools.partial' behavior kwargs[param_name] = param.default while True: # Let's iterate through the positional arguments and corresponding # parameters try: arg_val = next(arg_vals) except StopIteration: # No more positional arguments try: param = next(parameters) except StopIteration: # No more parameters. That's it. Just need to check that # we have no `kwargs` after this while loop break else: if param.kind == _VAR_POSITIONAL: # That's OK, just empty *args. Let's start parsing # kwargs break elif param.name in kwargs: if param.kind == _POSITIONAL_ONLY: msg = ( "{arg!r} parameter is positional only, " "but was passed as a keyword" ) msg = msg.format(arg=param.name) raise TypeError(msg) parameters_ex = (param,) break elif param.kind == _VAR_KEYWORD or param.default is not _empty: # That's fine too - we have a default value for this # parameter. So, lets start parsing `kwargs`, starting # with the current parameter parameters_ex = (param,) break else: if partial: parameters_ex = (param,) break else: msg = "{arg!r} parameter lacking default value" msg = msg.format(arg=param.name) raise TypeError(msg) else: # We have a positional argument to process try: param = next(parameters) except StopIteration: raise TypeError("too many positional arguments") else: if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): # Looks like we have no parameter for this positional # argument raise TypeError("too many positional arguments") if param.kind == _VAR_POSITIONAL: # We have an '*args'-like argument, let's fill it with # all positional arguments we have left and move on to # the next phase values = [arg_val] values.extend(arg_vals) arguments[param.name] = tuple(values) break if param.name in kwargs: raise TypeError( "multiple values for argument " "{arg!r}".format(arg=param.name) ) arguments[param.name] = arg_val # Now, we iterate through the remaining parameters to process # keyword arguments kwargs_param = None for param in itertools.chain(parameters_ex, parameters): if param.kind == _POSITIONAL_ONLY: # This should never happen in case of a properly built # Signature object (but let's have this check here # to ensure correct behaviour just in case) raise TypeError( "{arg!r} parameter is positional only, " "but was passed as a keyword".format(arg=param.name) ) if param.kind == _VAR_KEYWORD: # Memorize that we have a '**kwargs'-like parameter kwargs_param = param continue param_name = param.name try: arg_val = kwargs.pop(param_name) except KeyError: # We have no value for this parameter. It's fine though, # if it has a default value, or it is an '*args'-like # parameter, left alone by the processing of positional # arguments. if ( not partial and param.kind != _VAR_POSITIONAL and param.default is _empty ): raise TypeError( "{arg!r} parameter lacking default value".format(arg=param_name) ) else: arguments[param_name] = arg_val if kwargs: if kwargs_param is not None: # Process our '**kwargs'-like parameter arguments[kwargs_param.name] = kwargs else: raise TypeError("too many keyword arguments") return self._bound_arguments_cls(self, arguments)
python
def _bind(self, args, kwargs, partial=False): """Private method. Don't use directly.""" arguments = OrderedDict() parameters = iter(self.parameters.values()) parameters_ex = () arg_vals = iter(args) if partial: # Support for binding arguments to 'functools.partial' objects. # See 'functools.partial' case in 'signature()' implementation # for details. for param_name, param in self.parameters.items(): if param._partial_kwarg and param_name not in kwargs: # Simulating 'functools.partial' behavior kwargs[param_name] = param.default while True: # Let's iterate through the positional arguments and corresponding # parameters try: arg_val = next(arg_vals) except StopIteration: # No more positional arguments try: param = next(parameters) except StopIteration: # No more parameters. That's it. Just need to check that # we have no `kwargs` after this while loop break else: if param.kind == _VAR_POSITIONAL: # That's OK, just empty *args. Let's start parsing # kwargs break elif param.name in kwargs: if param.kind == _POSITIONAL_ONLY: msg = ( "{arg!r} parameter is positional only, " "but was passed as a keyword" ) msg = msg.format(arg=param.name) raise TypeError(msg) parameters_ex = (param,) break elif param.kind == _VAR_KEYWORD or param.default is not _empty: # That's fine too - we have a default value for this # parameter. So, lets start parsing `kwargs`, starting # with the current parameter parameters_ex = (param,) break else: if partial: parameters_ex = (param,) break else: msg = "{arg!r} parameter lacking default value" msg = msg.format(arg=param.name) raise TypeError(msg) else: # We have a positional argument to process try: param = next(parameters) except StopIteration: raise TypeError("too many positional arguments") else: if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): # Looks like we have no parameter for this positional # argument raise TypeError("too many positional arguments") if param.kind == _VAR_POSITIONAL: # We have an '*args'-like argument, let's fill it with # all positional arguments we have left and move on to # the next phase values = [arg_val] values.extend(arg_vals) arguments[param.name] = tuple(values) break if param.name in kwargs: raise TypeError( "multiple values for argument " "{arg!r}".format(arg=param.name) ) arguments[param.name] = arg_val # Now, we iterate through the remaining parameters to process # keyword arguments kwargs_param = None for param in itertools.chain(parameters_ex, parameters): if param.kind == _POSITIONAL_ONLY: # This should never happen in case of a properly built # Signature object (but let's have this check here # to ensure correct behaviour just in case) raise TypeError( "{arg!r} parameter is positional only, " "but was passed as a keyword".format(arg=param.name) ) if param.kind == _VAR_KEYWORD: # Memorize that we have a '**kwargs'-like parameter kwargs_param = param continue param_name = param.name try: arg_val = kwargs.pop(param_name) except KeyError: # We have no value for this parameter. It's fine though, # if it has a default value, or it is an '*args'-like # parameter, left alone by the processing of positional # arguments. if ( not partial and param.kind != _VAR_POSITIONAL and param.default is _empty ): raise TypeError( "{arg!r} parameter lacking default value".format(arg=param_name) ) else: arguments[param_name] = arg_val if kwargs: if kwargs_param is not None: # Process our '**kwargs'-like parameter arguments[kwargs_param.name] = kwargs else: raise TypeError("too many keyword arguments") return self._bound_arguments_cls(self, arguments)
[ "def", "_bind", "(", "self", ",", "args", ",", "kwargs", ",", "partial", "=", "False", ")", ":", "arguments", "=", "OrderedDict", "(", ")", "parameters", "=", "iter", "(", "self", ".", "parameters", ".", "values", "(", ")", ")", "parameters_ex", "=", "(", ")", "arg_vals", "=", "iter", "(", "args", ")", "if", "partial", ":", "# Support for binding arguments to 'functools.partial' objects.", "# See 'functools.partial' case in 'signature()' implementation", "# for details.", "for", "param_name", ",", "param", "in", "self", ".", "parameters", ".", "items", "(", ")", ":", "if", "param", ".", "_partial_kwarg", "and", "param_name", "not", "in", "kwargs", ":", "# Simulating 'functools.partial' behavior", "kwargs", "[", "param_name", "]", "=", "param", ".", "default", "while", "True", ":", "# Let's iterate through the positional arguments and corresponding", "# parameters", "try", ":", "arg_val", "=", "next", "(", "arg_vals", ")", "except", "StopIteration", ":", "# No more positional arguments", "try", ":", "param", "=", "next", "(", "parameters", ")", "except", "StopIteration", ":", "# No more parameters. That's it. Just need to check that", "# we have no `kwargs` after this while loop", "break", "else", ":", "if", "param", ".", "kind", "==", "_VAR_POSITIONAL", ":", "# That's OK, just empty *args. Let's start parsing", "# kwargs", "break", "elif", "param", ".", "name", "in", "kwargs", ":", "if", "param", ".", "kind", "==", "_POSITIONAL_ONLY", ":", "msg", "=", "(", "\"{arg!r} parameter is positional only, \"", "\"but was passed as a keyword\"", ")", "msg", "=", "msg", ".", "format", "(", "arg", "=", "param", ".", "name", ")", "raise", "TypeError", "(", "msg", ")", "parameters_ex", "=", "(", "param", ",", ")", "break", "elif", "param", ".", "kind", "==", "_VAR_KEYWORD", "or", "param", ".", "default", "is", "not", "_empty", ":", "# That's fine too - we have a default value for this", "# parameter. So, lets start parsing `kwargs`, starting", "# with the current parameter", "parameters_ex", "=", "(", "param", ",", ")", "break", "else", ":", "if", "partial", ":", "parameters_ex", "=", "(", "param", ",", ")", "break", "else", ":", "msg", "=", "\"{arg!r} parameter lacking default value\"", "msg", "=", "msg", ".", "format", "(", "arg", "=", "param", ".", "name", ")", "raise", "TypeError", "(", "msg", ")", "else", ":", "# We have a positional argument to process", "try", ":", "param", "=", "next", "(", "parameters", ")", "except", "StopIteration", ":", "raise", "TypeError", "(", "\"too many positional arguments\"", ")", "else", ":", "if", "param", ".", "kind", "in", "(", "_VAR_KEYWORD", ",", "_KEYWORD_ONLY", ")", ":", "# Looks like we have no parameter for this positional", "# argument", "raise", "TypeError", "(", "\"too many positional arguments\"", ")", "if", "param", ".", "kind", "==", "_VAR_POSITIONAL", ":", "# We have an '*args'-like argument, let's fill it with", "# all positional arguments we have left and move on to", "# the next phase", "values", "=", "[", "arg_val", "]", "values", ".", "extend", "(", "arg_vals", ")", "arguments", "[", "param", ".", "name", "]", "=", "tuple", "(", "values", ")", "break", "if", "param", ".", "name", "in", "kwargs", ":", "raise", "TypeError", "(", "\"multiple values for argument \"", "\"{arg!r}\"", ".", "format", "(", "arg", "=", "param", ".", "name", ")", ")", "arguments", "[", "param", ".", "name", "]", "=", "arg_val", "# Now, we iterate through the remaining parameters to process", "# keyword arguments", "kwargs_param", "=", "None", "for", "param", "in", "itertools", ".", "chain", "(", "parameters_ex", ",", "parameters", ")", ":", "if", "param", ".", "kind", "==", "_POSITIONAL_ONLY", ":", "# This should never happen in case of a properly built", "# Signature object (but let's have this check here", "# to ensure correct behaviour just in case)", "raise", "TypeError", "(", "\"{arg!r} parameter is positional only, \"", "\"but was passed as a keyword\"", ".", "format", "(", "arg", "=", "param", ".", "name", ")", ")", "if", "param", ".", "kind", "==", "_VAR_KEYWORD", ":", "# Memorize that we have a '**kwargs'-like parameter", "kwargs_param", "=", "param", "continue", "param_name", "=", "param", ".", "name", "try", ":", "arg_val", "=", "kwargs", ".", "pop", "(", "param_name", ")", "except", "KeyError", ":", "# We have no value for this parameter. It's fine though,", "# if it has a default value, or it is an '*args'-like", "# parameter, left alone by the processing of positional", "# arguments.", "if", "(", "not", "partial", "and", "param", ".", "kind", "!=", "_VAR_POSITIONAL", "and", "param", ".", "default", "is", "_empty", ")", ":", "raise", "TypeError", "(", "\"{arg!r} parameter lacking default value\"", ".", "format", "(", "arg", "=", "param_name", ")", ")", "else", ":", "arguments", "[", "param_name", "]", "=", "arg_val", "if", "kwargs", ":", "if", "kwargs_param", "is", "not", "None", ":", "# Process our '**kwargs'-like parameter", "arguments", "[", "kwargs_param", ".", "name", "]", "=", "kwargs", "else", ":", "raise", "TypeError", "(", "\"too many keyword arguments\"", ")", "return", "self", ".", "_bound_arguments_cls", "(", "self", ",", "arguments", ")" ]
Private method. Don't use directly.
[ "Private", "method", ".", "Don", "t", "use", "directly", "." ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/pyutils/signature.py#L672-L806
train
graphql-python/graphene
graphene/pyutils/signature.py
Signature.bind_partial
def bind_partial(self, *args, **kwargs): """Get a BoundArguments object, that partially maps the passed `args` and `kwargs` to the function's signature. Raises `TypeError` if the passed arguments can not be bound. """ return self._bind(args, kwargs, partial=True)
python
def bind_partial(self, *args, **kwargs): """Get a BoundArguments object, that partially maps the passed `args` and `kwargs` to the function's signature. Raises `TypeError` if the passed arguments can not be bound. """ return self._bind(args, kwargs, partial=True)
[ "def", "bind_partial", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_bind", "(", "args", ",", "kwargs", ",", "partial", "=", "True", ")" ]
Get a BoundArguments object, that partially maps the passed `args` and `kwargs` to the function's signature. Raises `TypeError` if the passed arguments can not be bound.
[ "Get", "a", "BoundArguments", "object", "that", "partially", "maps", "the", "passed", "args", "and", "kwargs", "to", "the", "function", "s", "signature", ".", "Raises", "TypeError", "if", "the", "passed", "arguments", "can", "not", "be", "bound", "." ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/pyutils/signature.py#L815-L820
train
graphql-python/graphene
graphene/relay/node.py
is_node
def is_node(objecttype): """ Check if the given objecttype has Node as an interface """ if not isclass(objecttype): return False if not issubclass(objecttype, ObjectType): return False for i in objecttype._meta.interfaces: if issubclass(i, Node): return True return False
python
def is_node(objecttype): """ Check if the given objecttype has Node as an interface """ if not isclass(objecttype): return False if not issubclass(objecttype, ObjectType): return False for i in objecttype._meta.interfaces: if issubclass(i, Node): return True return False
[ "def", "is_node", "(", "objecttype", ")", ":", "if", "not", "isclass", "(", "objecttype", ")", ":", "return", "False", "if", "not", "issubclass", "(", "objecttype", ",", "ObjectType", ")", ":", "return", "False", "for", "i", "in", "objecttype", ".", "_meta", ".", "interfaces", ":", "if", "issubclass", "(", "i", ",", "Node", ")", ":", "return", "True", "return", "False" ]
Check if the given objecttype has Node as an interface
[ "Check", "if", "the", "given", "objecttype", "has", "Node", "as", "an", "interface" ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/relay/node.py#L12-L26
train
graphql-python/graphene
graphene/pyutils/version.py
get_complete_version
def get_complete_version(version=None): """Returns a tuple of the graphene version. If version argument is non-empty, then checks for correctness of the tuple provided. """ if version is None: from graphene import VERSION as version else: assert len(version) == 5 assert version[3] in ("alpha", "beta", "rc", "final") return version
python
def get_complete_version(version=None): """Returns a tuple of the graphene version. If version argument is non-empty, then checks for correctness of the tuple provided. """ if version is None: from graphene import VERSION as version else: assert len(version) == 5 assert version[3] in ("alpha", "beta", "rc", "final") return version
[ "def", "get_complete_version", "(", "version", "=", "None", ")", ":", "if", "version", "is", "None", ":", "from", "graphene", "import", "VERSION", "as", "version", "else", ":", "assert", "len", "(", "version", ")", "==", "5", "assert", "version", "[", "3", "]", "in", "(", "\"alpha\"", ",", "\"beta\"", ",", "\"rc\"", ",", "\"final\"", ")", "return", "version" ]
Returns a tuple of the graphene version. If version argument is non-empty, then checks for correctness of the tuple provided.
[ "Returns", "a", "tuple", "of", "the", "graphene", "version", ".", "If", "version", "argument", "is", "non", "-", "empty", "then", "checks", "for", "correctness", "of", "the", "tuple", "provided", "." ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/pyutils/version.py#L40-L50
train
graphql-python/graphene
graphene/utils/thenables.py
maybe_thenable
def maybe_thenable(obj, on_resolve): """ Execute a on_resolve function once the thenable is resolved, returning the same type of object inputed. If the object is not thenable, it should return on_resolve(obj) """ if isawaitable(obj) and not isinstance(obj, Promise): return await_and_execute(obj, on_resolve) if is_thenable(obj): return Promise.resolve(obj).then(on_resolve) # If it's not awaitable not a Promise, return # the function executed over the object return on_resolve(obj)
python
def maybe_thenable(obj, on_resolve): """ Execute a on_resolve function once the thenable is resolved, returning the same type of object inputed. If the object is not thenable, it should return on_resolve(obj) """ if isawaitable(obj) and not isinstance(obj, Promise): return await_and_execute(obj, on_resolve) if is_thenable(obj): return Promise.resolve(obj).then(on_resolve) # If it's not awaitable not a Promise, return # the function executed over the object return on_resolve(obj)
[ "def", "maybe_thenable", "(", "obj", ",", "on_resolve", ")", ":", "if", "isawaitable", "(", "obj", ")", "and", "not", "isinstance", "(", "obj", ",", "Promise", ")", ":", "return", "await_and_execute", "(", "obj", ",", "on_resolve", ")", "if", "is_thenable", "(", "obj", ")", ":", "return", "Promise", ".", "resolve", "(", "obj", ")", ".", "then", "(", "on_resolve", ")", "# If it's not awaitable not a Promise, return", "# the function executed over the object", "return", "on_resolve", "(", "obj", ")" ]
Execute a on_resolve function once the thenable is resolved, returning the same type of object inputed. If the object is not thenable, it should return on_resolve(obj)
[ "Execute", "a", "on_resolve", "function", "once", "the", "thenable", "is", "resolved", "returning", "the", "same", "type", "of", "object", "inputed", ".", "If", "the", "object", "is", "not", "thenable", "it", "should", "return", "on_resolve", "(", "obj", ")" ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/utils/thenables.py#L28-L42
train
graphql-python/graphene
graphene/types/utils.py
get_field_as
def get_field_as(value, _as=None): """ Get type mounted """ if isinstance(value, MountedType): return value elif isinstance(value, UnmountedType): if _as is None: return value return _as.mounted(value)
python
def get_field_as(value, _as=None): """ Get type mounted """ if isinstance(value, MountedType): return value elif isinstance(value, UnmountedType): if _as is None: return value return _as.mounted(value)
[ "def", "get_field_as", "(", "value", ",", "_as", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "MountedType", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "UnmountedType", ")", ":", "if", "_as", "is", "None", ":", "return", "value", "return", "_as", ".", "mounted", "(", "value", ")" ]
Get type mounted
[ "Get", "type", "mounted" ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/types/utils.py#L12-L21
train
graphql-python/graphene
graphene/types/utils.py
yank_fields_from_attrs
def yank_fields_from_attrs(attrs, _as=None, sort=True): """ Extract all the fields in given attributes (dict) and return them ordered """ fields_with_names = [] for attname, value in list(attrs.items()): field = get_field_as(value, _as) if not field: continue fields_with_names.append((attname, field)) if sort: fields_with_names = sorted(fields_with_names, key=lambda f: f[1]) return OrderedDict(fields_with_names)
python
def yank_fields_from_attrs(attrs, _as=None, sort=True): """ Extract all the fields in given attributes (dict) and return them ordered """ fields_with_names = [] for attname, value in list(attrs.items()): field = get_field_as(value, _as) if not field: continue fields_with_names.append((attname, field)) if sort: fields_with_names = sorted(fields_with_names, key=lambda f: f[1]) return OrderedDict(fields_with_names)
[ "def", "yank_fields_from_attrs", "(", "attrs", ",", "_as", "=", "None", ",", "sort", "=", "True", ")", ":", "fields_with_names", "=", "[", "]", "for", "attname", ",", "value", "in", "list", "(", "attrs", ".", "items", "(", ")", ")", ":", "field", "=", "get_field_as", "(", "value", ",", "_as", ")", "if", "not", "field", ":", "continue", "fields_with_names", ".", "append", "(", "(", "attname", ",", "field", ")", ")", "if", "sort", ":", "fields_with_names", "=", "sorted", "(", "fields_with_names", ",", "key", "=", "lambda", "f", ":", "f", "[", "1", "]", ")", "return", "OrderedDict", "(", "fields_with_names", ")" ]
Extract all the fields in given attributes (dict) and return them ordered
[ "Extract", "all", "the", "fields", "in", "given", "attributes", "(", "dict", ")", "and", "return", "them", "ordered" ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/types/utils.py#L24-L38
train
graphql-python/graphene
graphene/utils/module_loading.py
import_string
def import_string(dotted_path, dotted_attributes=None): """ Import a dotted module path and return the attribute/class designated by the last name in the path. When a dotted attribute path is also provided, the dotted attribute path would be applied to the attribute/class retrieved from the first step, and return the corresponding value designated by the attribute path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit(".", 1) except ValueError: raise ImportError("%s doesn't look like a module path" % dotted_path) module = import_module(module_path) try: result = getattr(module, class_name) except AttributeError: raise ImportError( 'Module "%s" does not define a "%s" attribute/class' % (module_path, class_name) ) if not dotted_attributes: return result else: attributes = dotted_attributes.split(".") traveled_attributes = [] try: for attribute in attributes: traveled_attributes.append(attribute) result = getattr(result, attribute) return result except AttributeError: raise ImportError( 'Module "%s" does not define a "%s" attribute inside attribute/class "%s"' % (module_path, ".".join(traveled_attributes), class_name) )
python
def import_string(dotted_path, dotted_attributes=None): """ Import a dotted module path and return the attribute/class designated by the last name in the path. When a dotted attribute path is also provided, the dotted attribute path would be applied to the attribute/class retrieved from the first step, and return the corresponding value designated by the attribute path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit(".", 1) except ValueError: raise ImportError("%s doesn't look like a module path" % dotted_path) module = import_module(module_path) try: result = getattr(module, class_name) except AttributeError: raise ImportError( 'Module "%s" does not define a "%s" attribute/class' % (module_path, class_name) ) if not dotted_attributes: return result else: attributes = dotted_attributes.split(".") traveled_attributes = [] try: for attribute in attributes: traveled_attributes.append(attribute) result = getattr(result, attribute) return result except AttributeError: raise ImportError( 'Module "%s" does not define a "%s" attribute inside attribute/class "%s"' % (module_path, ".".join(traveled_attributes), class_name) )
[ "def", "import_string", "(", "dotted_path", ",", "dotted_attributes", "=", "None", ")", ":", "try", ":", "module_path", ",", "class_name", "=", "dotted_path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "except", "ValueError", ":", "raise", "ImportError", "(", "\"%s doesn't look like a module path\"", "%", "dotted_path", ")", "module", "=", "import_module", "(", "module_path", ")", "try", ":", "result", "=", "getattr", "(", "module", ",", "class_name", ")", "except", "AttributeError", ":", "raise", "ImportError", "(", "'Module \"%s\" does not define a \"%s\" attribute/class'", "%", "(", "module_path", ",", "class_name", ")", ")", "if", "not", "dotted_attributes", ":", "return", "result", "else", ":", "attributes", "=", "dotted_attributes", ".", "split", "(", "\".\"", ")", "traveled_attributes", "=", "[", "]", "try", ":", "for", "attribute", "in", "attributes", ":", "traveled_attributes", ".", "append", "(", "attribute", ")", "result", "=", "getattr", "(", "result", ",", "attribute", ")", "return", "result", "except", "AttributeError", ":", "raise", "ImportError", "(", "'Module \"%s\" does not define a \"%s\" attribute inside attribute/class \"%s\"'", "%", "(", "module_path", ",", "\".\"", ".", "join", "(", "traveled_attributes", ")", ",", "class_name", ")", ")" ]
Import a dotted module path and return the attribute/class designated by the last name in the path. When a dotted attribute path is also provided, the dotted attribute path would be applied to the attribute/class retrieved from the first step, and return the corresponding value designated by the attribute path. Raise ImportError if the import failed.
[ "Import", "a", "dotted", "module", "path", "and", "return", "the", "attribute", "/", "class", "designated", "by", "the", "last", "name", "in", "the", "path", ".", "When", "a", "dotted", "attribute", "path", "is", "also", "provided", "the", "dotted", "attribute", "path", "would", "be", "applied", "to", "the", "attribute", "/", "class", "retrieved", "from", "the", "first", "step", "and", "return", "the", "corresponding", "value", "designated", "by", "the", "attribute", "path", ".", "Raise", "ImportError", "if", "the", "import", "failed", "." ]
abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6
https://github.com/graphql-python/graphene/blob/abff3d75a39bc8f2d1fdb48aafa1866cf47dfff6/graphene/utils/module_loading.py#L5-L42
train
elastic/elasticsearch-dsl-py
examples/composite_agg.py
scan_aggs
def scan_aggs(search, source_aggs, inner_aggs={}, size=10): """ Helper function used to iterate over all possible bucket combinations of ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the ``composite`` aggregation under the hood to perform this. """ def run_search(**kwargs): s = search[:0] s.aggs.bucket('comp', 'composite', sources=source_aggs, size=size, **kwargs) for agg_name, agg in inner_aggs.items(): s.aggs['comp'][agg_name] = agg return s.execute() response = run_search() while response.aggregations.comp.buckets: for b in response.aggregations.comp.buckets: yield b if 'after_key' in response.aggregations.comp: after = response.aggregations.comp.after_key else: after= response.aggregations.comp.buckets[-1].key response = run_search(after=after)
python
def scan_aggs(search, source_aggs, inner_aggs={}, size=10): """ Helper function used to iterate over all possible bucket combinations of ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the ``composite`` aggregation under the hood to perform this. """ def run_search(**kwargs): s = search[:0] s.aggs.bucket('comp', 'composite', sources=source_aggs, size=size, **kwargs) for agg_name, agg in inner_aggs.items(): s.aggs['comp'][agg_name] = agg return s.execute() response = run_search() while response.aggregations.comp.buckets: for b in response.aggregations.comp.buckets: yield b if 'after_key' in response.aggregations.comp: after = response.aggregations.comp.after_key else: after= response.aggregations.comp.buckets[-1].key response = run_search(after=after)
[ "def", "scan_aggs", "(", "search", ",", "source_aggs", ",", "inner_aggs", "=", "{", "}", ",", "size", "=", "10", ")", ":", "def", "run_search", "(", "*", "*", "kwargs", ")", ":", "s", "=", "search", "[", ":", "0", "]", "s", ".", "aggs", ".", "bucket", "(", "'comp'", ",", "'composite'", ",", "sources", "=", "source_aggs", ",", "size", "=", "size", ",", "*", "*", "kwargs", ")", "for", "agg_name", ",", "agg", "in", "inner_aggs", ".", "items", "(", ")", ":", "s", ".", "aggs", "[", "'comp'", "]", "[", "agg_name", "]", "=", "agg", "return", "s", ".", "execute", "(", ")", "response", "=", "run_search", "(", ")", "while", "response", ".", "aggregations", ".", "comp", ".", "buckets", ":", "for", "b", "in", "response", ".", "aggregations", ".", "comp", ".", "buckets", ":", "yield", "b", "if", "'after_key'", "in", "response", ".", "aggregations", ".", "comp", ":", "after", "=", "response", ".", "aggregations", ".", "comp", ".", "after_key", "else", ":", "after", "=", "response", ".", "aggregations", ".", "comp", ".", "buckets", "[", "-", "1", "]", ".", "key", "response", "=", "run_search", "(", "after", "=", "after", ")" ]
Helper function used to iterate over all possible bucket combinations of ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the ``composite`` aggregation under the hood to perform this.
[ "Helper", "function", "used", "to", "iterate", "over", "all", "possible", "bucket", "combinations", "of", "source_aggs", "returning", "results", "of", "inner_aggs", "for", "each", ".", "Uses", "the", "composite", "aggregation", "under", "the", "hood", "to", "perform", "this", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/examples/composite_agg.py#L5-L26
train
elastic/elasticsearch-dsl-py
examples/completion.py
Person.clean
def clean(self): """ Automatically construct the suggestion input and weight by taking all possible permutation of Person's name as ``input`` and taking their popularity as ``weight``. """ self.suggest = { 'input': [' '.join(p) for p in permutations(self.name.split())], 'weight': self.popularity }
python
def clean(self): """ Automatically construct the suggestion input and weight by taking all possible permutation of Person's name as ``input`` and taking their popularity as ``weight``. """ self.suggest = { 'input': [' '.join(p) for p in permutations(self.name.split())], 'weight': self.popularity }
[ "def", "clean", "(", "self", ")", ":", "self", ".", "suggest", "=", "{", "'input'", ":", "[", "' '", ".", "join", "(", "p", ")", "for", "p", "in", "permutations", "(", "self", ".", "name", ".", "split", "(", ")", ")", "]", ",", "'weight'", ":", "self", ".", "popularity", "}" ]
Automatically construct the suggestion input and weight by taking all possible permutation of Person's name as ``input`` and taking their popularity as ``weight``.
[ "Automatically", "construct", "the", "suggestion", "input", "and", "weight", "by", "taking", "all", "possible", "permutation", "of", "Person", "s", "name", "as", "input", "and", "taking", "their", "popularity", "as", "weight", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/examples/completion.py#L38-L47
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/utils.py
ObjectBase.__list_fields
def __list_fields(cls): """ Get all the fields defined for our class, if we have an Index, try looking at the index mappings as well, mark the fields from Index as optional. """ for name in cls._doc_type.mapping: field = cls._doc_type.mapping[name] yield name, field, False if hasattr(cls.__class__, '_index'): if not cls._index._mapping: return for name in cls._index._mapping: # don't return fields that are in _doc_type if name in cls._doc_type.mapping: continue field = cls._index._mapping[name] yield name, field, True
python
def __list_fields(cls): """ Get all the fields defined for our class, if we have an Index, try looking at the index mappings as well, mark the fields from Index as optional. """ for name in cls._doc_type.mapping: field = cls._doc_type.mapping[name] yield name, field, False if hasattr(cls.__class__, '_index'): if not cls._index._mapping: return for name in cls._index._mapping: # don't return fields that are in _doc_type if name in cls._doc_type.mapping: continue field = cls._index._mapping[name] yield name, field, True
[ "def", "__list_fields", "(", "cls", ")", ":", "for", "name", "in", "cls", ".", "_doc_type", ".", "mapping", ":", "field", "=", "cls", ".", "_doc_type", ".", "mapping", "[", "name", "]", "yield", "name", ",", "field", ",", "False", "if", "hasattr", "(", "cls", ".", "__class__", ",", "'_index'", ")", ":", "if", "not", "cls", ".", "_index", ".", "_mapping", ":", "return", "for", "name", "in", "cls", ".", "_index", ".", "_mapping", ":", "# don't return fields that are in _doc_type", "if", "name", "in", "cls", ".", "_doc_type", ".", "mapping", ":", "continue", "field", "=", "cls", ".", "_index", ".", "_mapping", "[", "name", "]", "yield", "name", ",", "field", ",", "True" ]
Get all the fields defined for our class, if we have an Index, try looking at the index mappings as well, mark the fields from Index as optional.
[ "Get", "all", "the", "fields", "defined", "for", "our", "class", "if", "we", "have", "an", "Index", "try", "looking", "at", "the", "index", "mappings", "as", "well", "mark", "the", "fields", "from", "Index", "as", "optional", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/utils.py#L376-L394
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/update_by_query.py
UpdateByQuery._clone
def _clone(self): """ Return a clone of the current search request. Performs a shallow copy of all the underlying objects. Used internally by most state modifying APIs. """ ubq = super(UpdateByQuery, self)._clone() ubq._response_class = self._response_class ubq._script = self._script.copy() ubq.query._proxied = self.query._proxied return ubq
python
def _clone(self): """ Return a clone of the current search request. Performs a shallow copy of all the underlying objects. Used internally by most state modifying APIs. """ ubq = super(UpdateByQuery, self)._clone() ubq._response_class = self._response_class ubq._script = self._script.copy() ubq.query._proxied = self.query._proxied return ubq
[ "def", "_clone", "(", "self", ")", ":", "ubq", "=", "super", "(", "UpdateByQuery", ",", "self", ")", ".", "_clone", "(", ")", "ubq", ".", "_response_class", "=", "self", ".", "_response_class", "ubq", ".", "_script", "=", "self", ".", "_script", ".", "copy", "(", ")", "ubq", ".", "query", ".", "_proxied", "=", "self", ".", "query", ".", "_proxied", "return", "ubq" ]
Return a clone of the current search request. Performs a shallow copy of all the underlying objects. Used internally by most state modifying APIs.
[ "Return", "a", "clone", "of", "the", "current", "search", "request", ".", "Performs", "a", "shallow", "copy", "of", "all", "the", "underlying", "objects", ".", "Used", "internally", "by", "most", "state", "modifying", "APIs", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/update_by_query.py#L57-L68
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/update_by_query.py
UpdateByQuery.response_class
def response_class(self, cls): """ Override the default wrapper used for the response. """ ubq = self._clone() ubq._response_class = cls return ubq
python
def response_class(self, cls): """ Override the default wrapper used for the response. """ ubq = self._clone() ubq._response_class = cls return ubq
[ "def", "response_class", "(", "self", ",", "cls", ")", ":", "ubq", "=", "self", ".", "_clone", "(", ")", "ubq", ".", "_response_class", "=", "cls", "return", "ubq" ]
Override the default wrapper used for the response.
[ "Override", "the", "default", "wrapper", "used", "for", "the", "response", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/update_by_query.py#L70-L76
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/update_by_query.py
UpdateByQuery.update_from_dict
def update_from_dict(self, d): """ Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``. """ d = d.copy() if 'query' in d: self.query._proxied = Q(d.pop('query')) if 'script' in d: self._script = d.pop('script') self._extra = d return self
python
def update_from_dict(self, d): """ Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``. """ d = d.copy() if 'query' in d: self.query._proxied = Q(d.pop('query')) if 'script' in d: self._script = d.pop('script') self._extra = d return self
[ "def", "update_from_dict", "(", "self", ",", "d", ")", ":", "d", "=", "d", ".", "copy", "(", ")", "if", "'query'", "in", "d", ":", "self", ".", "query", ".", "_proxied", "=", "Q", "(", "d", ".", "pop", "(", "'query'", ")", ")", "if", "'script'", "in", "d", ":", "self", ".", "_script", "=", "d", ".", "pop", "(", "'script'", ")", "self", ".", "_extra", "=", "d", "return", "self" ]
Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``.
[ "Apply", "options", "from", "a", "serialized", "body", "to", "the", "current", "instance", ".", "Modifies", "the", "object", "in", "-", "place", ".", "Used", "mostly", "by", "from_dict", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/update_by_query.py#L78-L89
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/update_by_query.py
UpdateByQuery.script
def script(self, **kwargs): """ Define update action to take: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting-using.html for more details. Note: the API only accepts a single script, so calling the script multiple times will overwrite. Example:: ubq = Search() ubq = ubq.script(source="ctx._source.likes++"") ubq = ubq.script(source="ctx._source.likes += params.f"", lang="expression", params={'f': 3}) """ ubq = self._clone() if ubq._script: ubq._script = {} ubq._script.update(kwargs) return ubq
python
def script(self, **kwargs): """ Define update action to take: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting-using.html for more details. Note: the API only accepts a single script, so calling the script multiple times will overwrite. Example:: ubq = Search() ubq = ubq.script(source="ctx._source.likes++"") ubq = ubq.script(source="ctx._source.likes += params.f"", lang="expression", params={'f': 3}) """ ubq = self._clone() if ubq._script: ubq._script = {} ubq._script.update(kwargs) return ubq
[ "def", "script", "(", "self", ",", "*", "*", "kwargs", ")", ":", "ubq", "=", "self", ".", "_clone", "(", ")", "if", "ubq", ".", "_script", ":", "ubq", ".", "_script", "=", "{", "}", "ubq", ".", "_script", ".", "update", "(", "kwargs", ")", "return", "ubq" ]
Define update action to take: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting-using.html for more details. Note: the API only accepts a single script, so calling the script multiple times will overwrite. Example:: ubq = Search() ubq = ubq.script(source="ctx._source.likes++"") ubq = ubq.script(source="ctx._source.likes += params.f"", lang="expression", params={'f': 3})
[ "Define", "update", "action", "to", "take", ":", "https", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "modules", "-", "scripting", "-", "using", ".", "html", "for", "more", "details", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/update_by_query.py#L91-L111
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/update_by_query.py
UpdateByQuery.to_dict
def to_dict(self, **kwargs): """ Serialize the search into the dictionary that will be sent over as the request'ubq body. All additional keyword arguments will be included into the dictionary. """ d = {} if self.query: d["query"] = self.query.to_dict() if self._script: d['script'] = self._script d.update(self._extra) d.update(kwargs) return d
python
def to_dict(self, **kwargs): """ Serialize the search into the dictionary that will be sent over as the request'ubq body. All additional keyword arguments will be included into the dictionary. """ d = {} if self.query: d["query"] = self.query.to_dict() if self._script: d['script'] = self._script d.update(self._extra) d.update(kwargs) return d
[ "def", "to_dict", "(", "self", ",", "*", "*", "kwargs", ")", ":", "d", "=", "{", "}", "if", "self", ".", "query", ":", "d", "[", "\"query\"", "]", "=", "self", ".", "query", ".", "to_dict", "(", ")", "if", "self", ".", "_script", ":", "d", "[", "'script'", "]", "=", "self", ".", "_script", "d", ".", "update", "(", "self", ".", "_extra", ")", "d", ".", "update", "(", "kwargs", ")", "return", "d" ]
Serialize the search into the dictionary that will be sent over as the request'ubq body. All additional keyword arguments will be included into the dictionary.
[ "Serialize", "the", "search", "into", "the", "dictionary", "that", "will", "be", "sent", "over", "as", "the", "request", "ubq", "body", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/update_by_query.py#L113-L130
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/update_by_query.py
UpdateByQuery.execute
def execute(self): """ Execute the search and return an instance of ``Response`` wrapping all the data. """ es = connections.get_connection(self._using) self._response = self._response_class( self, es.update_by_query( index=self._index, body=self.to_dict(), **self._params ) ) return self._response
python
def execute(self): """ Execute the search and return an instance of ``Response`` wrapping all the data. """ es = connections.get_connection(self._using) self._response = self._response_class( self, es.update_by_query( index=self._index, body=self.to_dict(), **self._params ) ) return self._response
[ "def", "execute", "(", "self", ")", ":", "es", "=", "connections", ".", "get_connection", "(", "self", ".", "_using", ")", "self", ".", "_response", "=", "self", ".", "_response_class", "(", "self", ",", "es", ".", "update_by_query", "(", "index", "=", "self", ".", "_index", ",", "body", "=", "self", ".", "to_dict", "(", ")", ",", "*", "*", "self", ".", "_params", ")", ")", "return", "self", ".", "_response" ]
Execute the search and return an instance of ``Response`` wrapping all the data.
[ "Execute", "the", "search", "and", "return", "an", "instance", "of", "Response", "wrapping", "all", "the", "data", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/update_by_query.py#L132-L147
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/mapping.py
Properties._collect_fields
def _collect_fields(self): """ Iterate over all Field objects within, including multi fields. """ for f in itervalues(self.properties.to_dict()): yield f # multi fields if hasattr(f, 'fields'): for inner_f in itervalues(f.fields.to_dict()): yield inner_f # nested and inner objects if hasattr(f, '_collect_fields'): for inner_f in f._collect_fields(): yield inner_f
python
def _collect_fields(self): """ Iterate over all Field objects within, including multi fields. """ for f in itervalues(self.properties.to_dict()): yield f # multi fields if hasattr(f, 'fields'): for inner_f in itervalues(f.fields.to_dict()): yield inner_f # nested and inner objects if hasattr(f, '_collect_fields'): for inner_f in f._collect_fields(): yield inner_f
[ "def", "_collect_fields", "(", "self", ")", ":", "for", "f", "in", "itervalues", "(", "self", ".", "properties", ".", "to_dict", "(", ")", ")", ":", "yield", "f", "# multi fields", "if", "hasattr", "(", "f", ",", "'fields'", ")", ":", "for", "inner_f", "in", "itervalues", "(", "f", ".", "fields", ".", "to_dict", "(", ")", ")", ":", "yield", "inner_f", "# nested and inner objects", "if", "hasattr", "(", "f", ",", "'_collect_fields'", ")", ":", "for", "inner_f", "in", "f", ".", "_collect_fields", "(", ")", ":", "yield", "inner_f" ]
Iterate over all Field objects within, including multi fields.
[ "Iterate", "over", "all", "Field", "objects", "within", "including", "multi", "fields", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/mapping.py#L40-L51
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.clone
def clone(self, name=None, using=None): """ Create a copy of the instance with another name or connection alias. Useful for creating multiple indices with shared configuration:: i = Index('base-index') i.settings(number_of_shards=1) i.create() i2 = i.clone('other-index') i2.create() :arg name: name of the index :arg using: connection alias to use, defaults to ``'default'`` """ i = Index(name or self._name, using=using or self._using) i._settings = self._settings.copy() i._aliases = self._aliases.copy() i._analysis = self._analysis.copy() i._doc_types = self._doc_types[:] if self._mapping is not None: i._mapping = self._mapping._clone() return i
python
def clone(self, name=None, using=None): """ Create a copy of the instance with another name or connection alias. Useful for creating multiple indices with shared configuration:: i = Index('base-index') i.settings(number_of_shards=1) i.create() i2 = i.clone('other-index') i2.create() :arg name: name of the index :arg using: connection alias to use, defaults to ``'default'`` """ i = Index(name or self._name, using=using or self._using) i._settings = self._settings.copy() i._aliases = self._aliases.copy() i._analysis = self._analysis.copy() i._doc_types = self._doc_types[:] if self._mapping is not None: i._mapping = self._mapping._clone() return i
[ "def", "clone", "(", "self", ",", "name", "=", "None", ",", "using", "=", "None", ")", ":", "i", "=", "Index", "(", "name", "or", "self", ".", "_name", ",", "using", "=", "using", "or", "self", ".", "_using", ")", "i", ".", "_settings", "=", "self", ".", "_settings", ".", "copy", "(", ")", "i", ".", "_aliases", "=", "self", ".", "_aliases", ".", "copy", "(", ")", "i", ".", "_analysis", "=", "self", ".", "_analysis", ".", "copy", "(", ")", "i", ".", "_doc_types", "=", "self", ".", "_doc_types", "[", ":", "]", "if", "self", ".", "_mapping", "is", "not", "None", ":", "i", ".", "_mapping", "=", "self", ".", "_mapping", ".", "_clone", "(", ")", "return", "i" ]
Create a copy of the instance with another name or connection alias. Useful for creating multiple indices with shared configuration:: i = Index('base-index') i.settings(number_of_shards=1) i.create() i2 = i.clone('other-index') i2.create() :arg name: name of the index :arg using: connection alias to use, defaults to ``'default'``
[ "Create", "a", "copy", "of", "the", "instance", "with", "another", "name", "or", "connection", "alias", ".", "Useful", "for", "creating", "multiple", "indices", "with", "shared", "configuration", "::" ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L81-L103
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.document
def document(self, document): """ Associate a :class:`~elasticsearch_dsl.Document` subclass with an index. This means that, when this index is created, it will contain the mappings for the ``Document``. If the ``Document`` class doesn't have a default index yet (by defining ``class Index``), this instance will be used. Can be used as a decorator:: i = Index('blog') @i.document class Post(Document): title = Text() # create the index, including Post mappings i.create() # .search() will now return a Search object that will return # properly deserialized Post instances s = i.search() """ self._doc_types.append(document) # If the document index does not have any name, that means the user # did not set any index already to the document. # So set this index as document index if document._index._name is None: document._index = self return document
python
def document(self, document): """ Associate a :class:`~elasticsearch_dsl.Document` subclass with an index. This means that, when this index is created, it will contain the mappings for the ``Document``. If the ``Document`` class doesn't have a default index yet (by defining ``class Index``), this instance will be used. Can be used as a decorator:: i = Index('blog') @i.document class Post(Document): title = Text() # create the index, including Post mappings i.create() # .search() will now return a Search object that will return # properly deserialized Post instances s = i.search() """ self._doc_types.append(document) # If the document index does not have any name, that means the user # did not set any index already to the document. # So set this index as document index if document._index._name is None: document._index = self return document
[ "def", "document", "(", "self", ",", "document", ")", ":", "self", ".", "_doc_types", ".", "append", "(", "document", ")", "# If the document index does not have any name, that means the user", "# did not set any index already to the document.", "# So set this index as document index", "if", "document", ".", "_index", ".", "_name", "is", "None", ":", "document", ".", "_index", "=", "self", "return", "document" ]
Associate a :class:`~elasticsearch_dsl.Document` subclass with an index. This means that, when this index is created, it will contain the mappings for the ``Document``. If the ``Document`` class doesn't have a default index yet (by defining ``class Index``), this instance will be used. Can be used as a decorator:: i = Index('blog') @i.document class Post(Document): title = Text() # create the index, including Post mappings i.create() # .search() will now return a Search object that will return # properly deserialized Post instances s = i.search()
[ "Associate", "a", ":", "class", ":", "~elasticsearch_dsl", ".", "Document", "subclass", "with", "an", "index", ".", "This", "means", "that", "when", "this", "index", "is", "created", "it", "will", "contain", "the", "mappings", "for", "the", "Document", ".", "If", "the", "Document", "class", "doesn", "t", "have", "a", "default", "index", "yet", "(", "by", "defining", "class", "Index", ")", "this", "instance", "will", "be", "used", ".", "Can", "be", "used", "as", "a", "decorator", "::" ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L121-L150
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.analyzer
def analyzer(self, *args, **kwargs): """ Explicitly add an analyzer to an index. Note that all custom analyzers defined in mappings will also be created. This is useful for search analyzers. Example:: from elasticsearch_dsl import analyzer, tokenizer my_analyzer = analyzer('my_analyzer', tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), filter=['lowercase'] ) i = Index('blog') i.analyzer(my_analyzer) """ analyzer = analysis.analyzer(*args, **kwargs) d = analyzer.get_analysis_definition() # empty custom analyzer, probably already defined out of our control if not d: return # merge the definition merge(self._analysis, d, True)
python
def analyzer(self, *args, **kwargs): """ Explicitly add an analyzer to an index. Note that all custom analyzers defined in mappings will also be created. This is useful for search analyzers. Example:: from elasticsearch_dsl import analyzer, tokenizer my_analyzer = analyzer('my_analyzer', tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), filter=['lowercase'] ) i = Index('blog') i.analyzer(my_analyzer) """ analyzer = analysis.analyzer(*args, **kwargs) d = analyzer.get_analysis_definition() # empty custom analyzer, probably already defined out of our control if not d: return # merge the definition merge(self._analysis, d, True)
[ "def", "analyzer", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "analyzer", "=", "analysis", ".", "analyzer", "(", "*", "args", ",", "*", "*", "kwargs", ")", "d", "=", "analyzer", ".", "get_analysis_definition", "(", ")", "# empty custom analyzer, probably already defined out of our control", "if", "not", "d", ":", "return", "# merge the definition", "merge", "(", "self", ".", "_analysis", ",", "d", ",", "True", ")" ]
Explicitly add an analyzer to an index. Note that all custom analyzers defined in mappings will also be created. This is useful for search analyzers. Example:: from elasticsearch_dsl import analyzer, tokenizer my_analyzer = analyzer('my_analyzer', tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), filter=['lowercase'] ) i = Index('blog') i.analyzer(my_analyzer)
[ "Explicitly", "add", "an", "analyzer", "to", "an", "index", ".", "Note", "that", "all", "custom", "analyzers", "defined", "in", "mappings", "will", "also", "be", "created", ".", "This", "is", "useful", "for", "search", "analyzers", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L175-L200
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.search
def search(self, using=None): """ Return a :class:`~elasticsearch_dsl.Search` object searching over the index (or all the indices belonging to this template) and its ``Document``\\s. """ return Search( using=using or self._using, index=self._name, doc_type=self._doc_types )
python
def search(self, using=None): """ Return a :class:`~elasticsearch_dsl.Search` object searching over the index (or all the indices belonging to this template) and its ``Document``\\s. """ return Search( using=using or self._using, index=self._name, doc_type=self._doc_types )
[ "def", "search", "(", "self", ",", "using", "=", "None", ")", ":", "return", "Search", "(", "using", "=", "using", "or", "self", ".", "_using", ",", "index", "=", "self", ".", "_name", ",", "doc_type", "=", "self", ".", "_doc_types", ")" ]
Return a :class:`~elasticsearch_dsl.Search` object searching over the index (or all the indices belonging to this template) and its ``Document``\\s.
[ "Return", "a", ":", "class", ":", "~elasticsearch_dsl", ".", "Search", "object", "searching", "over", "the", "index", "(", "or", "all", "the", "indices", "belonging", "to", "this", "template", ")", "and", "its", "Document", "\\\\", "s", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L221-L231
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.updateByQuery
def updateByQuery(self, using=None): """ Return a :class:`~elasticsearch_dsl.UpdateByQuery` object searching over the index (or all the indices belonging to this template) and updating Documents that match the search criteria. For more information, see here: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html """ return UpdateByQuery( using=using or self._using, index=self._name, )
python
def updateByQuery(self, using=None): """ Return a :class:`~elasticsearch_dsl.UpdateByQuery` object searching over the index (or all the indices belonging to this template) and updating Documents that match the search criteria. For more information, see here: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html """ return UpdateByQuery( using=using or self._using, index=self._name, )
[ "def", "updateByQuery", "(", "self", ",", "using", "=", "None", ")", ":", "return", "UpdateByQuery", "(", "using", "=", "using", "or", "self", ".", "_using", ",", "index", "=", "self", ".", "_name", ",", ")" ]
Return a :class:`~elasticsearch_dsl.UpdateByQuery` object searching over the index (or all the indices belonging to this template) and updating Documents that match the search criteria. For more information, see here: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html
[ "Return", "a", ":", "class", ":", "~elasticsearch_dsl", ".", "UpdateByQuery", "object", "searching", "over", "the", "index", "(", "or", "all", "the", "indices", "belonging", "to", "this", "template", ")", "and", "updating", "Documents", "that", "match", "the", "search", "criteria", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L233-L245
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.create
def create(self, using=None, **kwargs): """ Creates the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.create`` unchanged. """ self._get_connection(using).indices.create(index=self._name, body=self.to_dict(), **kwargs)
python
def create(self, using=None, **kwargs): """ Creates the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.create`` unchanged. """ self._get_connection(using).indices.create(index=self._name, body=self.to_dict(), **kwargs)
[ "def", "create", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "create", "(", "index", "=", "self", ".", "_name", ",", "body", "=", "self", ".", "to_dict", "(", ")", ",", "*", "*", "kwargs", ")" ]
Creates the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.create`` unchanged.
[ "Creates", "the", "index", "in", "elasticsearch", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L247-L254
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.save
def save(self, using=None): """ Sync the index definition with elasticsearch, creating the index if it doesn't exist and updating its settings and mappings if it does. Note some settings and mapping changes cannot be done on an open index (or at all on an existing index) and for those this method will fail with the underlying exception. """ if not self.exists(using=using): return self.create(using=using) body = self.to_dict() settings = body.pop('settings', {}) analysis = settings.pop('analysis', None) current_settings = self.get_settings(using=using)[self._name]['settings']['index'] if analysis: if self.is_closed(using=using): # closed index, update away settings['analysis'] = analysis else: # compare analysis definition, if all analysis objects are # already defined as requested, skip analysis update and # proceed, otherwise raise IllegalOperation existing_analysis = current_settings.get('analysis', {}) if any( existing_analysis.get(section, {}).get(k, None) != analysis[section][k] for section in analysis for k in analysis[section] ): raise IllegalOperation( 'You cannot update analysis configuration on an open index, you need to close index %s first.' % self._name) # try and update the settings if settings: settings = settings.copy() for k, v in list(settings.items()): if k in current_settings and current_settings[k] == str(v): del settings[k] if settings: self.put_settings(using=using, body=settings) # update the mappings, any conflict in the mappings will result in an # exception mappings = body.pop('mappings', {}) if mappings: self.put_mapping(using=using, body=mappings)
python
def save(self, using=None): """ Sync the index definition with elasticsearch, creating the index if it doesn't exist and updating its settings and mappings if it does. Note some settings and mapping changes cannot be done on an open index (or at all on an existing index) and for those this method will fail with the underlying exception. """ if not self.exists(using=using): return self.create(using=using) body = self.to_dict() settings = body.pop('settings', {}) analysis = settings.pop('analysis', None) current_settings = self.get_settings(using=using)[self._name]['settings']['index'] if analysis: if self.is_closed(using=using): # closed index, update away settings['analysis'] = analysis else: # compare analysis definition, if all analysis objects are # already defined as requested, skip analysis update and # proceed, otherwise raise IllegalOperation existing_analysis = current_settings.get('analysis', {}) if any( existing_analysis.get(section, {}).get(k, None) != analysis[section][k] for section in analysis for k in analysis[section] ): raise IllegalOperation( 'You cannot update analysis configuration on an open index, you need to close index %s first.' % self._name) # try and update the settings if settings: settings = settings.copy() for k, v in list(settings.items()): if k in current_settings and current_settings[k] == str(v): del settings[k] if settings: self.put_settings(using=using, body=settings) # update the mappings, any conflict in the mappings will result in an # exception mappings = body.pop('mappings', {}) if mappings: self.put_mapping(using=using, body=mappings)
[ "def", "save", "(", "self", ",", "using", "=", "None", ")", ":", "if", "not", "self", ".", "exists", "(", "using", "=", "using", ")", ":", "return", "self", ".", "create", "(", "using", "=", "using", ")", "body", "=", "self", ".", "to_dict", "(", ")", "settings", "=", "body", ".", "pop", "(", "'settings'", ",", "{", "}", ")", "analysis", "=", "settings", ".", "pop", "(", "'analysis'", ",", "None", ")", "current_settings", "=", "self", ".", "get_settings", "(", "using", "=", "using", ")", "[", "self", ".", "_name", "]", "[", "'settings'", "]", "[", "'index'", "]", "if", "analysis", ":", "if", "self", ".", "is_closed", "(", "using", "=", "using", ")", ":", "# closed index, update away", "settings", "[", "'analysis'", "]", "=", "analysis", "else", ":", "# compare analysis definition, if all analysis objects are", "# already defined as requested, skip analysis update and", "# proceed, otherwise raise IllegalOperation", "existing_analysis", "=", "current_settings", ".", "get", "(", "'analysis'", ",", "{", "}", ")", "if", "any", "(", "existing_analysis", ".", "get", "(", "section", ",", "{", "}", ")", ".", "get", "(", "k", ",", "None", ")", "!=", "analysis", "[", "section", "]", "[", "k", "]", "for", "section", "in", "analysis", "for", "k", "in", "analysis", "[", "section", "]", ")", ":", "raise", "IllegalOperation", "(", "'You cannot update analysis configuration on an open index, you need to close index %s first.'", "%", "self", ".", "_name", ")", "# try and update the settings", "if", "settings", ":", "settings", "=", "settings", ".", "copy", "(", ")", "for", "k", ",", "v", "in", "list", "(", "settings", ".", "items", "(", ")", ")", ":", "if", "k", "in", "current_settings", "and", "current_settings", "[", "k", "]", "==", "str", "(", "v", ")", ":", "del", "settings", "[", "k", "]", "if", "settings", ":", "self", ".", "put_settings", "(", "using", "=", "using", ",", "body", "=", "settings", ")", "# update the mappings, any conflict in the mappings will result in an", "# exception", "mappings", "=", "body", ".", "pop", "(", "'mappings'", ",", "{", "}", ")", "if", "mappings", ":", "self", ".", "put_mapping", "(", "using", "=", "using", ",", "body", "=", "mappings", ")" ]
Sync the index definition with elasticsearch, creating the index if it doesn't exist and updating its settings and mappings if it does. Note some settings and mapping changes cannot be done on an open index (or at all on an existing index) and for those this method will fail with the underlying exception.
[ "Sync", "the", "index", "definition", "with", "elasticsearch", "creating", "the", "index", "if", "it", "doesn", "t", "exist", "and", "updating", "its", "settings", "and", "mappings", "if", "it", "does", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L260-L307
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.analyze
def analyze(self, using=None, **kwargs): """ Perform the analysis process on a text and return the tokens breakdown of the text. Any additional keyword arguments will be passed to ``Elasticsearch.indices.analyze`` unchanged. """ return self._get_connection(using).indices.analyze(index=self._name, **kwargs)
python
def analyze(self, using=None, **kwargs): """ Perform the analysis process on a text and return the tokens breakdown of the text. Any additional keyword arguments will be passed to ``Elasticsearch.indices.analyze`` unchanged. """ return self._get_connection(using).indices.analyze(index=self._name, **kwargs)
[ "def", "analyze", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "analyze", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Perform the analysis process on a text and return the tokens breakdown of the text. Any additional keyword arguments will be passed to ``Elasticsearch.indices.analyze`` unchanged.
[ "Perform", "the", "analysis", "process", "on", "a", "text", "and", "return", "the", "tokens", "breakdown", "of", "the", "text", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L309-L317
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.refresh
def refresh(self, using=None, **kwargs): """ Preforms a refresh operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.refresh`` unchanged. """ return self._get_connection(using).indices.refresh(index=self._name, **kwargs)
python
def refresh(self, using=None, **kwargs): """ Preforms a refresh operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.refresh`` unchanged. """ return self._get_connection(using).indices.refresh(index=self._name, **kwargs)
[ "def", "refresh", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "refresh", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Preforms a refresh operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.refresh`` unchanged.
[ "Preforms", "a", "refresh", "operation", "on", "the", "index", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L319-L326
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.flush
def flush(self, using=None, **kwargs): """ Preforms a flush operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.flush`` unchanged. """ return self._get_connection(using).indices.flush(index=self._name, **kwargs)
python
def flush(self, using=None, **kwargs): """ Preforms a flush operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.flush`` unchanged. """ return self._get_connection(using).indices.flush(index=self._name, **kwargs)
[ "def", "flush", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "flush", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Preforms a flush operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.flush`` unchanged.
[ "Preforms", "a", "flush", "operation", "on", "the", "index", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L328-L335
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.get
def get(self, using=None, **kwargs): """ The get index API allows to retrieve information about the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get`` unchanged. """ return self._get_connection(using).indices.get(index=self._name, **kwargs)
python
def get(self, using=None, **kwargs): """ The get index API allows to retrieve information about the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get`` unchanged. """ return self._get_connection(using).indices.get(index=self._name, **kwargs)
[ "def", "get", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "get", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
The get index API allows to retrieve information about the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get`` unchanged.
[ "The", "get", "index", "API", "allows", "to", "retrieve", "information", "about", "the", "index", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L337-L344
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.open
def open(self, using=None, **kwargs): """ Opens the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.open`` unchanged. """ return self._get_connection(using).indices.open(index=self._name, **kwargs)
python
def open(self, using=None, **kwargs): """ Opens the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.open`` unchanged. """ return self._get_connection(using).indices.open(index=self._name, **kwargs)
[ "def", "open", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "open", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Opens the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.open`` unchanged.
[ "Opens", "the", "index", "in", "elasticsearch", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L346-L353
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.close
def close(self, using=None, **kwargs): """ Closes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.close`` unchanged. """ return self._get_connection(using).indices.close(index=self._name, **kwargs)
python
def close(self, using=None, **kwargs): """ Closes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.close`` unchanged. """ return self._get_connection(using).indices.close(index=self._name, **kwargs)
[ "def", "close", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "close", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Closes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.close`` unchanged.
[ "Closes", "the", "index", "in", "elasticsearch", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L355-L362
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.delete
def delete(self, using=None, **kwargs): """ Deletes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.delete`` unchanged. """ return self._get_connection(using).indices.delete(index=self._name, **kwargs)
python
def delete(self, using=None, **kwargs): """ Deletes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.delete`` unchanged. """ return self._get_connection(using).indices.delete(index=self._name, **kwargs)
[ "def", "delete", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "delete", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Deletes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.delete`` unchanged.
[ "Deletes", "the", "index", "in", "elasticsearch", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L364-L371
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.exists
def exists(self, using=None, **kwargs): """ Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged. """ return self._get_connection(using).indices.exists(index=self._name, **kwargs)
python
def exists(self, using=None, **kwargs): """ Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged. """ return self._get_connection(using).indices.exists(index=self._name, **kwargs)
[ "def", "exists", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "exists", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged.
[ "Returns", "True", "if", "the", "index", "already", "exists", "in", "elasticsearch", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L373-L380
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.exists_type
def exists_type(self, using=None, **kwargs): """ Check if a type/types exists in the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists_type`` unchanged. """ return self._get_connection(using).indices.exists_type(index=self._name, **kwargs)
python
def exists_type(self, using=None, **kwargs): """ Check if a type/types exists in the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists_type`` unchanged. """ return self._get_connection(using).indices.exists_type(index=self._name, **kwargs)
[ "def", "exists_type", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "exists_type", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Check if a type/types exists in the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists_type`` unchanged.
[ "Check", "if", "a", "type", "/", "types", "exists", "in", "the", "index", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L382-L389
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.put_mapping
def put_mapping(self, using=None, **kwargs): """ Register specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_mapping`` unchanged. """ return self._get_connection(using).indices.put_mapping(index=self._name, **kwargs)
python
def put_mapping(self, using=None, **kwargs): """ Register specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_mapping`` unchanged. """ return self._get_connection(using).indices.put_mapping(index=self._name, **kwargs)
[ "def", "put_mapping", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "put_mapping", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Register specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_mapping`` unchanged.
[ "Register", "specific", "mapping", "definition", "for", "a", "specific", "type", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L391-L398
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.get_mapping
def get_mapping(self, using=None, **kwargs): """ Retrieve specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_mapping`` unchanged. """ return self._get_connection(using).indices.get_mapping(index=self._name, **kwargs)
python
def get_mapping(self, using=None, **kwargs): """ Retrieve specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_mapping`` unchanged. """ return self._get_connection(using).indices.get_mapping(index=self._name, **kwargs)
[ "def", "get_mapping", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "get_mapping", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Retrieve specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_mapping`` unchanged.
[ "Retrieve", "specific", "mapping", "definition", "for", "a", "specific", "type", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L400-L407
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
Index.get_field_mapping
def get_field_mapping(self, using=None, **kwargs): """ Retrieve mapping definition of a specific field. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_field_mapping`` unchanged. """ return self._get_connection(using).indices.get_field_mapping(index=self._name, **kwargs)
python
def get_field_mapping(self, using=None, **kwargs): """ Retrieve mapping definition of a specific field. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_field_mapping`` unchanged. """ return self._get_connection(using).indices.get_field_mapping(index=self._name, **kwargs)
[ "def", "get_field_mapping", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "get_field_mapping", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
Retrieve mapping definition of a specific field. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_field_mapping`` unchanged.
[ "Retrieve", "mapping", "definition", "of", "a", "specific", "field", "." ]
874b52472fc47b601de0e5fa0e4300e21aff0085
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L409-L416
train