after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def unpack(g):
def _unpack_inner(f):
@functools.wraps(f)
def _call(*args, **kwargs):
gargs, gkwargs = g(*args, **kwargs)
return f(*gargs, **gkwargs)
return _call
return _unpack_inner
|
def unpack(g):
def _unpack_inner(f):
@functools.wraps(f)
def _call(**kwargs):
return f(**g(**kwargs))
return _call
return _unpack_inner
|
https://github.com/awslabs/autogluon/issues/575
|
Loaded data from: https://autogluon.s3.amazonaws.com/datasets/AdultIncomeBinaryClassification/train_data.csv | Columns = 15 / 15 | Rows = 39073 -> 39073
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2666bea8bf44> in <module>
3 # train_path = data_dir + "titanic_clean.csv"
4 train_data = task.Dataset(file_path=train_path)
----> 5 predictor = task.fit(train_data, label='survived', output_directory='ag-example-out/')
TypeError: _call() takes 0 positional arguments but 1 was given
|
TypeError
|
def _unpack_inner(f):
@functools.wraps(f)
def _call(*args, **kwargs):
gargs, gkwargs = g(*args, **kwargs)
return f(*gargs, **gkwargs)
return _call
|
def _unpack_inner(f):
@functools.wraps(f)
def _call(**kwargs):
return f(**g(**kwargs))
return _call
|
https://github.com/awslabs/autogluon/issues/575
|
Loaded data from: https://autogluon.s3.amazonaws.com/datasets/AdultIncomeBinaryClassification/train_data.csv | Columns = 15 / 15 | Rows = 39073 -> 39073
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2666bea8bf44> in <module>
3 # train_path = data_dir + "titanic_clean.csv"
4 train_data = task.Dataset(file_path=train_path)
----> 5 predictor = task.fit(train_data, label='survived', output_directory='ag-example-out/')
TypeError: _call() takes 0 positional arguments but 1 was given
|
TypeError
|
def _call(*args, **kwargs):
gargs, gkwargs = g(*args, **kwargs)
return f(*gargs, **gkwargs)
|
def _call(**kwargs):
return f(**g(**kwargs))
|
https://github.com/awslabs/autogluon/issues/575
|
Loaded data from: https://autogluon.s3.amazonaws.com/datasets/AdultIncomeBinaryClassification/train_data.csv | Columns = 15 / 15 | Rows = 39073 -> 39073
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2666bea8bf44> in <module>
3 # train_path = data_dir + "titanic_clean.csv"
4 train_data = task.Dataset(file_path=train_path)
----> 5 predictor = task.fit(train_data, label='survived', output_directory='ag-example-out/')
TypeError: _call() takes 0 positional arguments but 1 was given
|
TypeError
|
def set_presets(*args, **kwargs):
if "presets" in kwargs:
presets = kwargs["presets"]
if presets is None:
return kwargs
if not isinstance(presets, list):
presets = [presets]
preset_kwargs = {}
for preset in presets:
if isinstance(preset, str):
preset_orig = preset
preset = preset_dict.get(preset, None)
if preset is None:
raise ValueError(
f"Preset '{preset_orig}' was not found. Valid presets: {list(preset_dict.keys())}"
)
if isinstance(preset, dict):
for key in preset:
preset_kwargs[key] = preset[key]
else:
raise TypeError(
f"Preset of type {type(preset)} was given, but only presets of type [dict, str] are valid."
)
for key in preset_kwargs:
if key not in kwargs:
kwargs[key] = preset_kwargs[key]
return args, kwargs
|
def set_presets(**kwargs):
if "presets" in kwargs:
presets = kwargs["presets"]
if presets is None:
return kwargs
if not isinstance(presets, list):
presets = [presets]
preset_kwargs = {}
for preset in presets:
if isinstance(preset, str):
preset_orig = preset
preset = preset_dict.get(preset, None)
if preset is None:
raise ValueError(
f"Preset '{preset_orig}' was not found. Valid presets: {list(preset_dict.keys())}"
)
if isinstance(preset, dict):
for key in preset:
preset_kwargs[key] = preset[key]
else:
raise TypeError(
f"Preset of type {type(preset)} was given, but only presets of type [dict, str] are valid."
)
for key in preset_kwargs:
if key not in kwargs:
kwargs[key] = preset_kwargs[key]
return kwargs
|
https://github.com/awslabs/autogluon/issues/575
|
Loaded data from: https://autogluon.s3.amazonaws.com/datasets/AdultIncomeBinaryClassification/train_data.csv | Columns = 15 / 15 | Rows = 39073 -> 39073
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2666bea8bf44> in <module>
3 # train_path = data_dir + "titanic_clean.csv"
4 train_data = task.Dataset(file_path=train_path)
----> 5 predictor = task.fit(train_data, label='survived', output_directory='ag-example-out/')
TypeError: _call() takes 0 positional arguments but 1 was given
|
TypeError
|
def generate_features(self, X: DataFrame):
if not self.fit:
self._compute_feature_transformations()
X_features = pd.DataFrame(index=X.index)
for column in X.columns:
if X[column].dtype.name == "object":
X[column].fillna("", inplace=True)
else:
X[column].fillna(np.nan, inplace=True)
X_text_features_combined = []
if self.feature_transformations["text_special"]:
for nlp_feature in self.feature_transformations["text_special"]:
X_text_features = self.generate_text_special(X[nlp_feature], nlp_feature)
X_text_features_combined.append(X_text_features)
X_text_features_combined = pd.concat(X_text_features_combined, axis=1)
X = self.preprocess(X)
if self.feature_transformations["raw"]:
X_features = X_features.join(X[self.feature_transformations["raw"]])
if self.feature_transformations["category"]:
X_categoricals = X[self.feature_transformations["category"]]
# TODO: Add stateful categorical generator, merge rare cases to an unknown value
# TODO: What happens when training set has no unknown/rare values but test set does? What models can handle this?
if "text" in self.feature_type_family:
self.feature_type_family_generated["text_as_category"] += (
self.feature_type_family["text"]
)
X_categoricals = X_categoricals.astype("category")
X_features = X_features.join(X_categoricals)
if self.feature_transformations["text_special"]:
if not self.fit:
self.features_binned += list(X_text_features_combined.columns)
self.feature_type_family_generated["text_special"] += list(
X_text_features_combined.columns
)
X_features = X_features.join(X_text_features_combined)
if self.feature_transformations["datetime"]:
for datetime_feature in self.feature_transformations["datetime"]:
X_features[datetime_feature] = pd.to_datetime(X[datetime_feature])
X_features[datetime_feature] = pd.to_numeric(
X_features[datetime_feature]
) # TODO: Use actual date info
self.feature_type_family_generated["datetime"].append(datetime_feature)
# TODO: Add fastai date features
if self.feature_transformations["text_ngram"]:
# Combine Text Fields
features_nlp_current = ["__nlp__"]
if not self.fit:
features_nlp_to_remove = []
logger.log(
15,
"Fitting vectorizer for text features: "
+ str(self.feature_transformations["text_ngram"]),
)
for nlp_feature in features_nlp_current:
# TODO: Preprocess text?
if nlp_feature == "__nlp__":
text_list = list(
set(
[
". ".join(row)
for row in X[
self.feature_transformations["text_ngram"]
].values
]
)
)
else:
text_list = list(X[nlp_feature].drop_duplicates().values)
vectorizer_raw = copy.deepcopy(self.vectorizer_default_raw)
try:
vectorizer_fit, _ = self.train_vectorizer(text_list, vectorizer_raw)
self.vectorizers.append(vectorizer_fit)
except ValueError:
logger.debug("Removing 'text_ngram' features due to error")
features_nlp_to_remove = self.feature_transformations["text_ngram"]
self.feature_transformations["text_ngram"] = [
feature
for feature in self.feature_transformations["text_ngram"]
if feature not in features_nlp_to_remove
]
X_features_cols_prior_to_nlp = list(X_features.columns)
downsample_ratio = None
nlp_failure_count = 0
keep_trying_nlp = True
while keep_trying_nlp:
try:
X_nlp_features_combined = self.generate_text_ngrams(
X=X,
features_nlp_current=features_nlp_current,
downsample_ratio=downsample_ratio,
)
if self.feature_transformations["text_ngram"]:
X_features = X_features.join(X_nlp_features_combined)
if not self.fit:
self.feature_type_family_generated["text_ngram"] += list(
X_nlp_features_combined.columns
)
keep_trying_nlp = False
except Exception as err:
nlp_failure_count += 1
if self.fit:
logger.exception(
"Error: OOM error during NLP feature transform, unrecoverable. Increase memory allocation or reduce data size to avoid this error."
)
raise
traceback.print_tb(err.__traceback__)
X_features = X_features[X_features_cols_prior_to_nlp]
skip_nlp = False
for vectorizer in self.vectorizers:
vocab_size = len(vectorizer.vocabulary_)
if vocab_size <= 50:
skip_nlp = True
break
else:
if nlp_failure_count >= 3:
skip_nlp = True
if skip_nlp:
logger.log(
15,
"Warning: ngrams generation resulted in OOM error, removing ngrams features. If you want to use ngrams for this problem, increase memory allocation for AutoGluon.",
)
logger.debug(str(err))
self.vectorizers = []
if "text_ngram" in self.feature_transformations:
self.feature_transformations.pop("text_ngram")
if "text_ngram" in self.feature_type_family_generated:
self.feature_type_family_generated.pop("text_ngram")
self.enable_nlp_features = False
keep_trying_nlp = False
else:
logger.log(
15,
"Warning: ngrams generation resulted in OOM error, attempting to reduce ngram feature count. If you want to optimally use ngrams for this problem, increase memory allocation for AutoGluon.",
)
logger.debug(str(err))
downsample_ratio = 0.25
return X_features
|
def generate_features(self, X: DataFrame):
if not self.fit:
self._compute_feature_transformations()
X_features = pd.DataFrame(index=X.index)
for column in X.columns:
if X[column].dtype.name == "object":
X[column].fillna("", inplace=True)
else:
X[column].fillna(np.nan, inplace=True)
X_text_features_combined = []
if self.feature_transformations["text_special"]:
for nlp_feature in self.feature_transformations["text_special"]:
X_text_features = self.generate_text_features(X[nlp_feature], nlp_feature)
X_text_features_combined.append(X_text_features)
X_text_features_combined = pd.concat(X_text_features_combined, axis=1)
X = self.preprocess(X)
if self.feature_transformations["raw"]:
X_features = X_features.join(X[self.feature_transformations["raw"]])
if self.feature_transformations["category"]:
X_categoricals = X[self.feature_transformations["category"]]
# TODO: Add stateful categorical generator, merge rare cases to an unknown value
# TODO: What happens when training set has no unknown/rare values but test set does? What models can handle this?
if "text" in self.feature_type_family:
self.feature_type_family_generated["text_as_category"] += (
self.feature_type_family["text"]
)
X_categoricals = X_categoricals.astype("category")
X_features = X_features.join(X_categoricals)
if self.feature_transformations["text_special"]:
if not self.fit:
self.features_binned += list(X_text_features_combined.columns)
self.feature_type_family_generated["text_special"] += list(
X_text_features_combined.columns
)
X_features = X_features.join(X_text_features_combined)
if self.feature_transformations["datetime"]:
for datetime_feature in self.feature_transformations["datetime"]:
X_features[datetime_feature] = pd.to_datetime(X[datetime_feature])
X_features[datetime_feature] = pd.to_numeric(
X_features[datetime_feature]
) # TODO: Use actual date info
self.feature_type_family_generated["datetime"].append(datetime_feature)
# TODO: Add fastai date features
if self.feature_transformations["text_ngram"]:
# Combine Text Fields
features_nlp_current = ["__nlp__"]
if not self.fit:
features_nlp_to_remove = []
logger.log(
15,
"Fitting vectorizer for text features: "
+ str(self.feature_transformations["text_ngram"]),
)
for nlp_feature in features_nlp_current:
# TODO: Preprocess text?
if nlp_feature == "__nlp__":
text_list = list(
set(
[
". ".join(row)
for row in X[
self.feature_transformations["text_ngram"]
].values
]
)
)
else:
text_list = list(X[nlp_feature].drop_duplicates().values)
vectorizer_raw = copy.deepcopy(self.vectorizer_default_raw)
try:
vectorizer_fit, _ = self.train_vectorizer(text_list, vectorizer_raw)
self.vectorizers.append(vectorizer_fit)
except ValueError:
logger.debug("Removing 'text_ngram' features due to error")
features_nlp_to_remove = self.feature_transformations["text_ngram"]
self.feature_transformations["text_ngram"] = [
feature
for feature in self.feature_transformations["text_ngram"]
if feature not in features_nlp_to_remove
]
X_features_cols_prior_to_nlp = list(X_features.columns)
downsample_ratio = None
nlp_failure_count = 0
keep_trying_nlp = True
while keep_trying_nlp:
try:
X_nlp_features_combined = self.generate_text_ngrams(
X=X,
features_nlp_current=features_nlp_current,
downsample_ratio=downsample_ratio,
)
if self.feature_transformations["text_ngram"]:
X_features = X_features.join(X_nlp_features_combined)
if not self.fit:
self.feature_type_family_generated["text_ngram"] += list(
X_nlp_features_combined.columns
)
keep_trying_nlp = False
except Exception as err:
nlp_failure_count += 1
if self.fit:
logger.exception(
"Error: OOM error during NLP feature transform, unrecoverable. Increase memory allocation or reduce data size to avoid this error."
)
raise
traceback.print_tb(err.__traceback__)
X_features = X_features[X_features_cols_prior_to_nlp]
skip_nlp = False
for vectorizer in self.vectorizers:
vocab_size = len(vectorizer.vocabulary_)
if vocab_size <= 50:
skip_nlp = True
break
else:
if nlp_failure_count >= 3:
skip_nlp = True
if skip_nlp:
logger.log(
15,
"Warning: ngrams generation resulted in OOM error, removing ngrams features. If you want to use ngrams for this problem, increase memory allocation for AutoGluon.",
)
logger.debug(str(err))
self.vectorizers = []
if "text_ngram" in self.feature_transformations:
self.feature_transformations.pop("text_ngram")
if "text_ngram" in self.feature_type_family_generated:
self.feature_type_family_generated.pop("text_ngram")
self.enable_nlp_features = False
keep_trying_nlp = False
else:
logger.log(
15,
"Warning: ngrams generation resulted in OOM error, attempting to reduce ngram feature count. If you want to optimally use ngrams for this problem, increase memory allocation for AutoGluon.",
)
logger.debug(str(err))
downsample_ratio = 0.25
return X_features
|
https://github.com/awslabs/autogluon/issues/575
|
Loaded data from: https://autogluon.s3.amazonaws.com/datasets/AdultIncomeBinaryClassification/train_data.csv | Columns = 15 / 15 | Rows = 39073 -> 39073
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-2666bea8bf44> in <module>
3 # train_path = data_dir + "titanic_clean.csv"
4 train_data = task.Dataset(file_path=train_path)
----> 5 predictor = task.fit(train_data, label='survived', output_directory='ag-example-out/')
TypeError: _call() takes 0 positional arguments but 1 was given
|
TypeError
|
def Dataset(
path=None, name=None, train=True, input_size=224, crop_ratio=0.875, *args, **kwargs
):
"""Dataset for AutoGluon image classification tasks.
May either be a :class:`autogluon.task.image_classification.ImageFolderDataset`, :class:`autogluon.task.image_classification.RecordDataset`,
or a popular dataset already built into AutoGluon ('mnist', 'fashionmnist', 'cifar10', 'cifar100', 'imagenet').
Parameters
----------
path : str, optional
The data location. If using :class:`ImageFolderDataset`,
image folder`path/to/the/folder` should be provided.
If using :class:`RecordDataset`, the `path/to/*.rec` should be provided.
name : str, optional
Which built-in dataset to use, will override all other options if specified.
The options are: 'mnist', 'fashionmnist', 'cifar', 'cifar10', 'cifar100', 'imagenet'
train : bool, optional, default = True
Whether this dataset should be used for training or validation.
input_size : int
The input image size.
crop_ratio : float
Center crop ratio (for evaluation only).
Returns
-------
Dataset object that can be passed to `task.fit()`, which is actually an :class:`autogluon.space.AutoGluonObject`.
To interact with such an object yourself, you must first call `Dataset.init()` to instantiate the object in Python.
"""
if name is None:
if path is None:
raise ValueError(
"Either `path` or `name` must be present in Dataset(). "
"If `name` is provided, it will override the rest of the arguments."
)
return get_dataset(
path=path,
train=train,
name=name,
input_size=input_size,
crop_ratio=crop_ratio,
*args,
**kwargs,
)
|
def Dataset(*args, **kwargs):
"""Dataset for AutoGluon image classification tasks.
May either be a :class:`autogluon.task.image_classification.ImageFolderDataset`, :class:`autogluon.task.image_classification.RecordDataset`,
or a popular dataset already built into AutoGluon ('mnist', 'fashionmnist', 'cifar10', 'cifar100', 'imagenet').
Parameters
----------
name : str, optional
Which built-in dataset to use, will override all other options if specified.
The options are: 'mnist', 'fashionmnist', 'cifar', 'cifar10', 'cifar100', 'imagenet'
train : bool, default = True
Whether this dataset should be used for training or validation.
train_path : str
The training data location. If using :class:`ImageFolderDataset`,
image folder`path/to/the/folder` should be provided.
If using :class:`RecordDataset`, the `path/to/*.rec` should be provided.
input_size : int
The input image size.
crop_ratio : float
Center crop ratio (for evaluation only).
Returns
-------
Dataset object that can be passed to `task.fit()`, which is actually an :class:`autogluon.space.AutoGluonObject`.
To interact with such an object yourself, you must first call `Dataset.init()` to instantiate the object in Python.
"""
return get_dataset(*args, **kwargs)
|
https://github.com/awslabs/autogluon/issues/218
|
scheduler: FIFOScheduler(
DistributedResourceManager{
(Remote: Remote REMOTE_ID: 0,
<Remote: 'inproc://172.18.30.18/28461/1' processes=1 threads=4, memory=7.28 GB>, Resource: NodeResourceManager(4 CPUs, 0 GPUs))
})
Starting Experiments
Num of Finished Tasks is 0
Num of Pending Tasks is 4
25%|█████████████████████▌ | 1/4 [00:00<00:00, 8.97it/s]Process Process-2:
Traceback (most recent call last):
File "/home/z/anaconda3/envs/IC2/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/home/z/anaconda3/envs/IC2/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/scheduler/scheduler.py", line 125, in _worker
ret = fn(**kwargs)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/core/decorator.py", line 52, in __call__
args = sample_config(args, new_config)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/core/decorator.py", line 38, in sample_config
args_dict[k] = v.init()
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/core/space.py", line 110, in init
return self.sample(**config)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/core/decorator.py", line 185, in sample
return self.func(*self.args, **kwargs)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/task/image_classification/dataset.py", line 108, in get_dataset
if '.rec' in path:
TypeError: argument of type 'NoneType' is not iterable
|
TypeError
|
def auto_suggest_network(dataset, net):
if isinstance(dataset, str):
dataset_name = dataset
elif isinstance(dataset, AutoGluonObject):
if "name" in dataset.kwargs and dataset.kwargs["name"] is not None:
dataset_name = dataset.kwargs["name"]
else:
return net
else:
return net
dataset_name = dataset_name.lower()
if "mnist" in dataset_name:
if isinstance(net, str) or isinstance(net, Categorical):
net = mnist_net()
logger.info(
"Auto suggesting network net for dataset {}".format(net, dataset_name)
)
return net
elif "cifar" in dataset_name:
if isinstance(net, str):
if "cifar" not in net:
net = "cifar_resnet20_v1"
elif isinstance(net, Categorical):
newdata = []
for x in net.data:
if "cifar" in x:
newdata.append(x)
net.data = (
newdata
if len(newdata) > 0
else ["cifar_resnet20_v1", "cifar_resnet56_v1"]
)
logger.info(
"Auto suggesting network net for dataset {}".format(net, dataset_name)
)
return net
|
def auto_suggest_network(dataset, net):
if isinstance(dataset, str):
dataset_name = dataset
elif isinstance(dataset, AutoGluonObject):
if "name" in dataset.kwargs:
dataset_name = dataset.kwargs["name"]
else:
return net
else:
return net
dataset_name = dataset_name.lower()
if "mnist" in dataset_name:
if isinstance(net, str) or isinstance(net, Categorical):
net = mnist_net()
logger.info(
"Auto suggesting network net for dataset {}".format(net, dataset_name)
)
return net
elif "cifar" in dataset_name:
if isinstance(net, str):
if "cifar" not in net:
net = "cifar_resnet20_v1"
elif isinstance(net, Categorical):
newdata = []
for x in net.data:
if "cifar" in x:
newdata.append(x)
net.data = (
newdata
if len(newdata) > 0
else ["cifar_resnet20_v1", "cifar_resnet56_v1"]
)
logger.info(
"Auto suggesting network net for dataset {}".format(net, dataset_name)
)
return net
|
https://github.com/awslabs/autogluon/issues/218
|
scheduler: FIFOScheduler(
DistributedResourceManager{
(Remote: Remote REMOTE_ID: 0,
<Remote: 'inproc://172.18.30.18/28461/1' processes=1 threads=4, memory=7.28 GB>, Resource: NodeResourceManager(4 CPUs, 0 GPUs))
})
Starting Experiments
Num of Finished Tasks is 0
Num of Pending Tasks is 4
25%|█████████████████████▌ | 1/4 [00:00<00:00, 8.97it/s]Process Process-2:
Traceback (most recent call last):
File "/home/z/anaconda3/envs/IC2/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/home/z/anaconda3/envs/IC2/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/scheduler/scheduler.py", line 125, in _worker
ret = fn(**kwargs)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/core/decorator.py", line 52, in __call__
args = sample_config(args, new_config)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/core/decorator.py", line 38, in sample_config
args_dict[k] = v.init()
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/core/space.py", line 110, in init
return self.sample(**config)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/core/decorator.py", line 185, in sample
return self.func(*self.args, **kwargs)
File "/home/z/anaconda3/envs/IC2/lib/python3.6/site-packages/autogluon/task/image_classification/dataset.py", line 108, in get_dataset
if '.rec' in path:
TypeError: argument of type 'NoneType' is not iterable
|
TypeError
|
def balanced_accuracy(solution, prediction):
y_type, solution, prediction = _check_targets(solution, prediction)
if y_type not in ["binary", "multiclass", "multilabel-indicator"]:
raise ValueError(f"{y_type} is not supported")
if y_type == "binary":
# Do not transform into any multiclass representation
pass
elif y_type == "multiclass":
n = len(solution)
unique_sol, encoded_sol = np.unique(solution, return_inverse=True)
unique_pred, encoded_pred = np.unique(prediction, return_inverse=True)
classes = np.unique(np.concatenate((unique_sol, unique_pred)))
map_sol = np.array([np.where(classes == c)[0][0] for c in unique_sol])
map_pred = np.array([np.where(classes == c)[0][0] for c in unique_pred])
# one hot encoding
sol_ohe = np.zeros((n, len(classes)))
pred_ohe = np.zeros((n, len(classes)))
sol_ohe[np.arange(n), map_sol[encoded_sol]] = 1
pred_ohe[np.arange(n), map_pred[encoded_pred]] = 1
solution = sol_ohe
prediction = pred_ohe
elif y_type == "multilabel-indicator":
solution = solution.toarray()
prediction = prediction.toarray()
else:
raise NotImplementedError(f"bac_metric does not support task type {y_type}")
fn = np.sum(np.multiply(solution, (1 - prediction)), axis=0, dtype=float)
tp = np.sum(np.multiply(solution, prediction), axis=0, dtype=float)
# Bounding to avoid division by 0
eps = 1e-15
tp = np.maximum(eps, tp)
pos_num = np.maximum(eps, tp + fn)
tpr = tp / pos_num # true positive rate (sensitivity)
if y_type in ("binary", "multilabel-indicator"):
tn = np.sum(np.multiply((1 - solution), (1 - prediction)), axis=0, dtype=float)
fp = np.sum(np.multiply((1 - solution), prediction), axis=0, dtype=float)
tn = np.maximum(eps, tn)
neg_num = np.maximum(eps, tn + fp)
tnr = tn / neg_num # true negative rate (specificity)
bac = 0.5 * (tpr + tnr)
elif y_type == "multiclass":
bac = tpr
else:
raise ValueError(y_type)
return np.mean(bac) # average over all classes
|
def balanced_accuracy(solution, prediction):
y_type, solution, prediction = _check_targets(solution, prediction)
if y_type not in ["binary", "multiclass", "multilabel-indicator"]:
raise ValueError(f"{y_type} is not supported")
if y_type == "binary":
# Do not transform into any multiclass representation
pass
elif y_type == "multiclass":
# Need to create a multiclass solution and a multiclass predictions
max_class = int(np.max((np.max(solution), np.max(prediction))))
solution_binary = np.zeros((len(solution), max_class + 1))
prediction_binary = np.zeros((len(prediction), max_class + 1))
for i in range(len(solution)):
solution_binary[i, int(solution[i])] = 1
prediction_binary[i, int(prediction[i])] = 1
solution = solution_binary
prediction = prediction_binary
elif y_type == "multilabel-indicator":
solution = solution.toarray()
prediction = prediction.toarray()
else:
raise NotImplementedError(f"bac_metric does not support task type {y_type}")
fn = np.sum(np.multiply(solution, (1 - prediction)), axis=0, dtype=float)
tp = np.sum(np.multiply(solution, prediction), axis=0, dtype=float)
# Bounding to avoid division by 0
eps = 1e-15
tp = np.maximum(eps, tp)
pos_num = np.maximum(eps, tp + fn)
tpr = tp / pos_num # true positive rate (sensitivity)
if y_type in ("binary", "multilabel-indicator"):
tn = np.sum(np.multiply((1 - solution), (1 - prediction)), axis=0, dtype=float)
fp = np.sum(np.multiply((1 - solution), prediction), axis=0, dtype=float)
tn = np.maximum(eps, tn)
neg_num = np.maximum(eps, tn + fp)
tnr = tn / neg_num # true negative rate (specificity)
bac = 0.5 * (tpr + tnr)
elif y_type == "multiclass":
bac = tpr
else:
raise ValueError(y_type)
return np.mean(bac) # average over all classes
|
https://github.com/awslabs/autogluon/issues/378
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-28-ddcca1d23e69> in <module>
----> 1 ag.utils.tabular.metrics.balanced_accuracy(y_test, y_pred)
~/Desktop/github-aws/autogluon-public/autogluon/utils/tabular/metrics/__init__.py in __call__(self, y_true, y_pred, sample_weight)
87 else:
88 return self._sign * self._score_func(y_true, y_pred,
---> 89 **self._kwargs)
90
91
~/Desktop/github-aws/autogluon-public/autogluon/utils/tabular/metrics/classification_metrics.py in balanced_accuracy(solution, prediction)
19 elif y_type == 'multiclass':
20 # Need to create a multiclass solution and a multiclass predictions
---> 21 max_class = int(np.max((np.max(solution), np.max(prediction))))
22 solution_binary = np.zeros((len(solution), max_class + 1))
23 prediction_binary = np.zeros((len(prediction), max_class + 1))
<__array_function__ internals> in amax(*args, **kwargs)
~/Desktop/github-aws/ghaws/lib/python3.7/site-packages/numpy/core/fromnumeric.py in amax(a, axis, out, keepdims, initial, where)
2666 """
2667 return _wrapreduction(a, np.maximum, 'max', axis, None, out,
-> 2668 keepdims=keepdims, initial=initial, where=where)
2669
2670
~/Desktop/github-aws/ghaws/lib/python3.7/site-packages/numpy/core/fromnumeric.py in _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs)
88 return reduction(axis=axis, out=out, **passkwargs)
89
---> 90 return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
91
92
TypeError: cannot perform reduce with flexible type
|
TypeError
|
def generate_csv(inds, path):
with open(path, "w") as csvFile:
row = ["id", "category"]
writer = csv.writer(csvFile)
writer.writerow(row)
id = 1
for ind in inds:
row = [id, ind]
writer = csv.writer(csvFile)
writer.writerow(row)
id += 1
csvFile.close()
|
def generate_csv(inds, path):
with open(path, "w") as csvFile:
row = ["id", "category"]
writer = csv.writer(csvFile)
writer.writerow(row)
id = 1
for ind in inds:
row = [id, ind.asscalar()]
writer = csv.writer(csvFile)
writer.writerow(row)
id += 1
csvFile.close()
|
https://github.com/awslabs/autogluon/issues/357
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-9-ef60c46c4aa0> in <module>
1 import autogluon as ag
----> 2 ag.utils.generate_csv(inds, 'submission_autogluon.csv')
/opt/conda/lib/python3.6/site-packages/autogluon/utils/file_helper.py in generate_csv(inds, path)
147 id = 1
148 for ind in inds:
--> 149 row = [id, ind.asscalar()]
150 writer = csv.writer(csvFile)
151 writer.writerow(row)
AttributeError: 'numpy.int64' object has no attribute 'asscalar'
|
AttributeError
|
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
pretrained = self.pretrained
if input_node.shape[3] not in [1, 3]:
if self.pretrained:
raise ValueError(
"When pretrained is set to True, expect input to "
"have 1 or 3 channels, bug got "
"{channels}.".format(channels=input_node.shape[3])
)
pretrained = False
if pretrained is None:
pretrained = hp.Boolean(PRETRAINED, default=False)
if pretrained:
with hp.conditional_scope(PRETRAINED, [True]):
trainable = hp.Boolean("trainable", default=False)
elif pretrained:
trainable = hp.Boolean("trainable", default=False)
if len(self.models) > 1:
version = hp.Choice("version", list(self.models.keys()))
else:
version = list(self.models.keys())[0]
min_size = self.min_size
if hp.Boolean("imagenet_size", default=False):
min_size = 224
if input_node.shape[1] < min_size or input_node.shape[2] < min_size:
input_node = layers.experimental.preprocessing.Resizing(
max(min_size, input_node.shape[1]),
max(min_size, input_node.shape[2]),
)(input_node)
if input_node.shape[3] == 1:
input_node = layers.Concatenate()([input_node] * 3)
if input_node.shape[3] != 3:
input_node = layers.Conv2D(filters=3, kernel_size=1, padding="same")(input_node)
if pretrained:
model = self.models[version](weights="imagenet", include_top=False)
model.trainable = trainable
else:
model = self.models[version](
weights=None, include_top=False, input_shape=input_node.shape[1:]
)
return model(input_node)
|
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
pretrained = self.pretrained
if input_node.shape[3] not in [1, 3]:
if self.pretrained:
raise ValueError(
"When pretrained is set to True, expect input to "
"have 1 or 3 channels, bug got "
"{channels}.".format(channels=input_node.shape[3])
)
pretrained = False
if pretrained is None:
pretrained = hp.Boolean("pretrained", default=False)
if len(self.models) > 1:
version = hp.Choice("version", list(self.models.keys()))
else:
version = list(self.models.keys())[0]
min_size = self.min_size
if hp.Boolean("imagenet_size", default=False):
min_size = 224
if input_node.shape[1] < min_size or input_node.shape[2] < min_size:
input_node = layers.experimental.preprocessing.Resizing(
max(min_size, input_node.shape[1]),
max(min_size, input_node.shape[2]),
)(input_node)
if input_node.shape[3] == 1:
input_node = layers.Concatenate()([input_node] * 3)
if input_node.shape[3] != 3:
input_node = layers.Conv2D(filters=3, kernel_size=1, padding="same")(input_node)
if pretrained:
model = self.models[version](weights="imagenet", include_top=False)
model.trainable = hp.Boolean("trainable", default=False)
else:
model = self.models[version](
weights=None, include_top=False, input_shape=input_node.shape[1:]
)
return model(input_node)
|
https://github.com/keras-team/autokeras/issues/1299
|
Traceback (most recent call last):
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 104, in build
model = self.hypermodel.build(hp)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\graph.py", line 263, in build
outputs = block.build(hp, inputs=temp_inputs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\engine\block.py", line 48, in _build_wrapper
return super()._build_wrapper(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\wrapper.py", line 102, in build
output_node = self._build_block(hp, output_node, block_type)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\wrapper.py", line 77, in _build_block
return basic.XceptionBlock().build(hp, output_node)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\engine\block.py", line 48, in _build_wrapper
return super()._build_wrapper(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\basic.py", line 574, in build
model.trainable = hp.Boolean("trainable", default=False)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hyperparameters.py", line 814, in Boolean
return self._retrieve(hp)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hyperparameters.py", line 625, in _retrieve
return self.values[hp.name]
KeyError: 'image_block_1/xception_block_1/trainable'
|
KeyError
|
def _get_best_hps(self):
best_trials = self.get_best_trials()
if best_trials:
return best_trials[0].hyperparameters.copy()
else:
return self.hyperparameters.copy()
|
def _get_best_hps(self):
best_trials = self.get_best_trials()
if best_trials:
return best_trials[0].hyperparameters
else:
return self.hyperparameters
|
https://github.com/keras-team/autokeras/issues/1299
|
Traceback (most recent call last):
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 104, in build
model = self.hypermodel.build(hp)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\graph.py", line 263, in build
outputs = block.build(hp, inputs=temp_inputs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\engine\block.py", line 48, in _build_wrapper
return super()._build_wrapper(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\wrapper.py", line 102, in build
output_node = self._build_block(hp, output_node, block_type)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\wrapper.py", line 77, in _build_block
return basic.XceptionBlock().build(hp, output_node)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\engine\block.py", line 48, in _build_wrapper
return super()._build_wrapper(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\basic.py", line 574, in build
model.trainable = hp.Boolean("trainable", default=False)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hyperparameters.py", line 814, in Boolean
return self._retrieve(hp)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hyperparameters.py", line 625, in _retrieve
return self.values[hp.name]
KeyError: 'image_block_1/xception_block_1/trainable'
|
KeyError
|
def _generate_hp_values(self, hp_names):
best_hps = self._get_best_hps()
collisions = 0
while True:
hps = kerastuner.HyperParameters()
# Generate a set of random values.
for hp in self.hyperparameters.space:
hps.merge([hp])
# if not active, do nothing.
# if active, check if selected to be changed.
if hps.is_active(hp):
# if was active and not selected, do nothing.
if best_hps.is_active(hp.name) and hp.name not in hp_names:
hps.values[hp.name] = best_hps.values[hp.name]
continue
# if was not active or selected, sample.
hps.values[hp.name] = hp.random_sample(self._seed_state)
self._seed_state += 1
values = hps.values
# Keep trying until the set of values is unique,
# or until we exit due to too many collisions.
values_hash = self._compute_values_hash(values)
if values_hash in self._tried_so_far:
collisions += 1
if collisions <= self._max_collisions:
continue
return None
self._tried_so_far.add(values_hash)
break
return values
|
def _generate_hp_values(self, hp_names):
best_hps = self._get_best_hps()
collisions = 0
while True:
hps = kerastuner.HyperParameters()
# Generate a set of random values.
for hp in best_hps.space:
hps.merge([hp])
# if not active, do nothing.
# if active, check if selected to be changed.
if hps.is_active(hp):
# if was active and not selected, do nothing.
if best_hps.is_active(hp.name) and hp.name not in hp_names:
continue
# if was not active or selected, sample.
hps.values[hp.name] = hp.random_sample(self._seed_state)
self._seed_state += 1
values = hps.values
# Keep trying until the set of values is unique,
# or until we exit due to too many collisions.
values_hash = self._compute_values_hash(values)
if values_hash in self._tried_so_far:
collisions += 1
if collisions <= self._max_collisions:
continue
return None
self._tried_so_far.add(values_hash)
break
return values
|
https://github.com/keras-team/autokeras/issues/1299
|
Traceback (most recent call last):
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 104, in build
model = self.hypermodel.build(hp)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\graph.py", line 263, in build
outputs = block.build(hp, inputs=temp_inputs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\engine\block.py", line 48, in _build_wrapper
return super()._build_wrapper(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\wrapper.py", line 102, in build
output_node = self._build_block(hp, output_node, block_type)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\wrapper.py", line 77, in _build_block
return basic.XceptionBlock().build(hp, output_node)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\engine\block.py", line 48, in _build_wrapper
return super()._build_wrapper(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hypermodel.py", line 64, in _build_wrapper
return self._build(hp, *args, **kwargs)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\autokeras\blocks\basic.py", line 574, in build
model.trainable = hp.Boolean("trainable", default=False)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hyperparameters.py", line 814, in Boolean
return self._retrieve(hp)
File "P:\ProgramFiles\anaconda\envs\python\test\lib\site-packages\kerastuner\engine\hyperparameters.py", line 625, in _retrieve
return self.values[hp.name]
KeyError: 'image_block_1/xception_block_1/trainable'
|
KeyError
|
def __init__(self, loss=None, metrics=None, output_shape=None, **kwargs):
super().__init__(**kwargs)
self.output_shape = output_shape
self.loss = tf.keras.losses.get(loss)
if metrics is None:
metrics = []
self.metrics = [tf.keras.metrics.get(metric) for metric in metrics]
|
def __init__(self, loss=None, metrics=None, output_shape=None, **kwargs):
super().__init__(**kwargs)
self.output_shape = output_shape
self.loss = loss
self.metrics = metrics
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def get_config(self):
config = super().get_config()
config.update(
{
"loss": tf.keras.losses.serialize(self.loss),
"metrics": [tf.keras.metrics.serialize(metric) for metric in self.metrics],
"output_shape": self.output_shape,
}
)
return config
|
def get_config(self):
config = super().get_config()
config.update(
{"loss": self.loss, "metrics": self.metrics, "output_shape": self.output_shape}
)
return config
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def __init__(self, preprocessors=None, **kwargs):
super().__init__(**kwargs)
self.preprocessors = nest.flatten(preprocessors)
self._finished = False
# Save or load the HyperModel.
self.hypermodel.hypermodel.save(os.path.join(self.project_dir, "graph"))
|
def __init__(self, preprocessors=None, **kwargs):
super().__init__(**kwargs)
self.preprocessors = nest.flatten(preprocessors)
self._finished = False
# Save or load the HyperModel.
utils.save_json(
os.path.join(self.project_dir, "graph"),
graph_module.serialize(self.hypermodel.hypermodel),
)
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def feature_encoding_input(block):
"""Fetch the column_types and column_names.
The values are fetched for FeatureEncoding from StructuredDataInput.
"""
if not isinstance(block.inputs[0], nodes_module.StructuredDataInput):
raise TypeError(
"FeatureEncoding block can only be used with StructuredDataInput."
)
block.column_types = block.inputs[0].column_types
block.column_names = block.inputs[0].column_names
|
def feature_encoding_input(block):
"""Fetch the column_types and column_names.
The values are fetched for FeatureEncoding from StructuredDataInput.
"""
if not isinstance(block.inputs[0], nodes.StructuredDataInput):
raise TypeError(
"FeatureEncoding block can only be used with StructuredDataInput."
)
block.column_types = block.inputs[0].column_types
block.column_names = block.inputs[0].column_names
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def get_config(self):
blocks = [hypermodels.serialize(block) for block in self.blocks]
nodes = {
str(self._node_to_id[node]): nodes_module.serialize(node)
for node in self.inputs
}
override_hps = [
kerastuner.engine.hyperparameters.serialize(hp) for hp in self.override_hps
]
block_inputs = {
str(block_id): [self._node_to_id[node] for node in block.inputs]
for block_id, block in enumerate(self.blocks)
}
block_outputs = {
str(block_id): [self._node_to_id[node] for node in block.outputs]
for block_id, block in enumerate(self.blocks)
}
outputs = [self._node_to_id[node] for node in self.outputs]
return {
"override_hps": override_hps, # List [serialized].
"blocks": blocks, # Dict {id: serialized}.
"nodes": nodes, # Dict {id: serialized}.
"outputs": outputs, # List of node_ids.
"block_inputs": block_inputs, # Dict {id: List of node_ids}.
"block_outputs": block_outputs, # Dict {id: List of node_ids}.
}
|
def get_config(self):
blocks = [serialize(block) for block in self.blocks]
nodes = {str(self._node_to_id[node]): serialize(node) for node in self.inputs}
override_hps = [
tf.keras.utils.serialize_keras_object(hp) for hp in self.override_hps
]
block_inputs = {
str(block_id): [self._node_to_id[node] for node in block.inputs]
for block_id, block in enumerate(self.blocks)
}
block_outputs = {
str(block_id): [self._node_to_id[node] for node in block.outputs]
for block_id, block in enumerate(self.blocks)
}
outputs = [self._node_to_id[node] for node in self.outputs]
return {
"override_hps": override_hps, # List [serialized].
"blocks": blocks, # Dict {id: serialized}.
"nodes": nodes, # Dict {id: serialized}.
"outputs": outputs, # List of node_ids.
"block_inputs": block_inputs, # Dict {id: List of node_ids}.
"block_outputs": block_outputs, # Dict {id: List of node_ids}.
}
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def from_config(cls, config):
blocks = [hypermodels.deserialize(block) for block in config["blocks"]]
nodes = {
int(node_id): nodes_module.deserialize(node)
for node_id, node in config["nodes"].items()
}
override_hps = [
kerastuner.engine.hyperparameters.deserialize(config)
for config in config["override_hps"]
]
inputs = [nodes[node_id] for node_id in nodes]
for block_id, block in enumerate(blocks):
input_nodes = [
nodes[node_id] for node_id in config["block_inputs"][str(block_id)]
]
output_nodes = nest.flatten(block(input_nodes))
for output_node, node_id in zip(
output_nodes, config["block_outputs"][str(block_id)]
):
nodes[node_id] = output_node
outputs = [nodes[node_id] for node_id in config["outputs"]]
return cls(inputs=inputs, outputs=outputs, override_hps=override_hps)
|
def from_config(cls, config):
blocks = [deserialize(block) for block in config["blocks"]]
nodes = {
int(node_id): deserialize(node) for node_id, node in config["nodes"].items()
}
override_hps = [
kerastuner.engine.hyperparameters.deserialize(config)
for config in config["override_hps"]
]
inputs = [nodes[node_id] for node_id in nodes]
for block_id, block in enumerate(blocks):
input_nodes = [
nodes[node_id] for node_id in config["block_inputs"][str(block_id)]
]
output_nodes = nest.flatten(block(input_nodes))
for output_node, node_id in zip(
output_nodes, config["block_outputs"][str(block_id)]
):
nodes[node_id] = output_node
outputs = [nodes[node_id] for node_id in config["outputs"]]
return cls(inputs=inputs, outputs=outputs, override_hps=override_hps)
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def __init__(
self,
num_classes: Optional[int] = None,
multi_label: bool = False,
loss: Optional[types.LossType] = None,
metrics: Optional[types.MetricsType] = None,
dropout_rate: Optional[float] = None,
**kwargs,
):
self.num_classes = num_classes
self.multi_label = multi_label
self.dropout_rate = dropout_rate
if metrics is None:
metrics = ["accuracy"]
if loss is None:
loss = self.infer_loss()
super().__init__(loss=loss, metrics=metrics, **kwargs)
|
def __init__(
self,
num_classes: Optional[int] = None,
multi_label: bool = False,
loss: Optional[types.LossType] = None,
metrics: Optional[types.MetricsType] = None,
dropout_rate: Optional[float] = None,
**kwargs,
):
super().__init__(loss=loss, metrics=metrics, **kwargs)
self.num_classes = num_classes
self.multi_label = multi_label
if not self.metrics:
self.metrics = ["accuracy"]
self.dropout_rate = dropout_rate
self.set_loss()
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def config_from_adapter(self, adapter):
super().config_from_adapter(adapter)
self.num_classes = adapter.num_classes
self.loss = self.infer_loss()
|
def config_from_adapter(self, adapter):
super().config_from_adapter(adapter)
self.num_classes = adapter.num_classes
self.set_loss()
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def __init__(
self,
output_dim: Optional[int] = None,
loss: types.LossType = "mean_squared_error",
metrics: Optional[types.MetricsType] = None,
dropout_rate: Optional[float] = None,
**kwargs,
):
if metrics is None:
metrics = ["mean_squared_error"]
super().__init__(loss=loss, metrics=metrics, **kwargs)
self.output_dim = output_dim
self.dropout_rate = dropout_rate
|
def __init__(
self,
output_dim: Optional[int] = None,
loss: types.LossType = "mean_squared_error",
metrics: Optional[types.MetricsType] = None,
dropout_rate: Optional[float] = None,
**kwargs,
):
super().__init__(loss=loss, metrics=metrics, **kwargs)
self.output_dim = output_dim
if not self.metrics:
self.metrics = ["mean_squared_error"]
self.loss = loss
self.dropout_rate = dropout_rate
|
https://github.com/keras-team/autokeras/issues/1057
|
Traceback (most recent call last):
File "mnist_model.py", line 12, in <module>
clf = ak.ImageClassifier(metrics=['accuracy', custom_metric], max_trials=3)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 67, in __init__
seed=seed)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tasks/image.py", line 19, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/auto_model.py", line 125, in __init__
project_name=name)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/task_specific.py", line 66, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/tuners/greedy.py", line 197, in __init__
**kwargs)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/engine/tuner.py", line 35, in __init__
graph_module.serialize(self.hypermodel.hypermodel))
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/site-packages/autokeras/utils.py", line 179, in save_json
obj = json.dumps(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/lhs18285/miniconda3/envs/nas/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
|
TypeError
|
def fit_before_convert(self, dataset):
# If in tf.data.Dataset, must be encoded already.
if isinstance(dataset, tf.data.Dataset):
if not self.num_classes:
shape = utils.dataset_shape(dataset)[0]
# Single column with 0s and 1s.
if shape == 1:
self.num_classes = 2
else:
self.num_classes = shape
return
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
if isinstance(dataset, pd.Series):
dataset = dataset.values.reshape(-1, 1)
# Not label.
if len(dataset.flatten()) != len(dataset):
self.num_classes = dataset.shape[1]
return
labels = set(dataset.flatten())
if self.num_classes is None:
self.num_classes = len(labels)
if self.num_classes == 2:
self.label_encoder = encoders.LabelEncoder()
elif self.num_classes > 2:
self.label_encoder = encoders.OneHotEncoder()
elif self.num_classes < 2:
raise ValueError(
"Expect the target data for {name} to have "
"at least 2 classes, but got {num_classes}.".format(
name=self.name, num_classes=self.num_classes
)
)
self.label_encoder.fit(dataset)
|
def fit_before_convert(self, dataset):
# If in tf.data.Dataset, must be encoded already.
if isinstance(dataset, tf.data.Dataset):
if not self.num_classes:
shape = dataset.take(1).shape[1]
if shape == 1:
self.num_classes = 2
else:
self.num_classes = shape
return
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
if isinstance(dataset, pd.Series):
dataset = dataset.values.reshape(-1, 1)
# Not label.
if len(dataset.flatten()) != len(dataset):
self.num_classes = dataset.shape[1]
return
labels = set(dataset.flatten())
if self.num_classes is None:
self.num_classes = len(labels)
if self.num_classes == 2:
self.label_encoder = encoders.LabelEncoder()
elif self.num_classes > 2:
self.label_encoder = encoders.OneHotEncoder()
elif self.num_classes < 2:
raise ValueError(
"Expect the target data for {name} to have "
"at least 2 classes, but got {num_classes}.".format(
name=self.name, num_classes=self.num_classes
)
)
self.label_encoder.fit(dataset)
|
https://github.com/keras-team/autokeras/issues/940
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-a90e48684d55> in <module>
----> 1 ak0.fit(mnist_train, epochs=10)
~/anaconda3/lib/python3.7/site-packages/autokeras/tasks/image.py in fit(self, x, y, epochs, callbacks, validation_split, validation_data, **kwargs)
119 validation_split=validation_split,
120 validation_data=validation_data,
--> 121 **kwargs)
122
123
~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in fit(self, x, y, batch_size, epochs, callbacks, validation_split, validation_data, **kwargs)
229 y=y,
230 validation_data=validation_data,
--> 231 validation_split=validation_split)
232
233 # Process the args.
~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _prepare_data(self, x, y, validation_data, validation_split)
303 # TODO: Handle other types of input, zip dataset, tensor, dict.
304 # Prepare the dataset.
--> 305 dataset = self._process_xy(x, y, True)
306 if validation_data:
307 self._split_dataset = False
~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _process_xy(self, x, y, fit)
291
292 x = self._process_x(x, fit)
--> 293 y = self._process_y(y, fit)
294
295 return tf.data.Dataset.zip((x, y))
~/anaconda3/lib/python3.7/site-packages/autokeras/auto_model.py in _process_y(self, y, fit)
267 for data, head, adapter in zip(y, self._heads, self._output_adapters):
268 if fit:
--> 269 data = adapter.fit_transform(data)
270 else:
271 data = adapter.transform(data)
~/anaconda3/lib/python3.7/site-packages/autokeras/engine/adapter.py in fit_transform(self, dataset)
66 def fit_transform(self, dataset):
67 self.check(dataset)
---> 68 self.fit_before_convert(dataset)
69 dataset = self.convert_to_dataset(dataset)
70 self.fit(dataset)
~/anaconda3/lib/python3.7/site-packages/autokeras/adapters/output_adapter.py in fit_before_convert(self, dataset)
65 if isinstance(dataset, tf.data.Dataset):
66 if not self.num_classes:
---> 67 shape = dataset.take(1).shape[1]
68 if shape == 1:
69 self.num_classes = 2
AttributeError: 'TakeDataset' object has no attribute 'shape'
|
AttributeError
|
def __init__(self, data):
super().__init__()
self.mean = np.mean(data, axis=0)
self.std = np.std(data, axis=0)
|
def __init__(self, data):
super().__init__()
self.max_val = data.max()
data = data / self.max_val
self.mean = np.mean(data, axis=0, keepdims=True).flatten()
self.std = np.std(data, axis=0, keepdims=True).flatten()
|
https://github.com/keras-team/autokeras/issues/385
|
Using TensorFlow backend.
Saving Directory: /tmp/autokeras_ASE3DJ
Initializing search.
Initialization finished.
+----------------------------------------------+
| Training model 0 |
+----------------------------------------------+
Using TensorFlow backend.
Epoch-1, Current Metric - 0: 0%| | 0/469 [00:00<?, ? batch/s]Process SpawnProcess-1:
Traceback (most recent call last):
File "/home/boyuan/anaconda3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/home/boyuan/anaconda3/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/home/boyuan/workspace/lab/autokeras/autokeras/search.py", line 276, in train
verbose=verbose).train_model(**trainer_args)
File "/home/boyuan/workspace/lab/autokeras/autokeras/nn/model_trainer.py", line 126, in train_model
self._train()
File "/home/boyuan/workspace/lab/autokeras/autokeras/nn/model_trainer.py", line 157, in _train
for batch_idx, (inputs, targets) in enumerate(deepcopy(loader)):
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 314, in __next__
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 314, in <listcomp>
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/boyuan/workspace/lab/autokeras/autokeras/preprocessor.py", line 302, in __getitem__
return self.compose(feature), self.target[index]
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 49, in __call__
img = t(img)
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 143, in __call__
return F.normalize(tensor, self.mean, self.std)
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/functional.py", line 165, in normalize
raise TypeError('tensor is not a torch image.')
TypeError: tensor is not a torch image.
|
TypeError
|
def transform_train(self, data, targets=None, batch_size=None):
data = (data - self.mean) / self.std
data = np.nan_to_num(data)
dataset = self._transform([], data, targets)
if batch_size is None:
batch_size = Constant.MAX_BATCH_SIZE
batch_size = min(len(data), batch_size)
return DataLoader(dataset, batch_size=batch_size, shuffle=True)
|
def transform_train(self, data, targets=None, batch_size=None):
dataset = self._transform(
[Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))], data, targets
)
if batch_size is None:
batch_size = Constant.MAX_BATCH_SIZE
batch_size = min(len(data), batch_size)
return DataLoader(dataset, batch_size=batch_size, shuffle=True)
|
https://github.com/keras-team/autokeras/issues/385
|
Using TensorFlow backend.
Saving Directory: /tmp/autokeras_ASE3DJ
Initializing search.
Initialization finished.
+----------------------------------------------+
| Training model 0 |
+----------------------------------------------+
Using TensorFlow backend.
Epoch-1, Current Metric - 0: 0%| | 0/469 [00:00<?, ? batch/s]Process SpawnProcess-1:
Traceback (most recent call last):
File "/home/boyuan/anaconda3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/home/boyuan/anaconda3/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/home/boyuan/workspace/lab/autokeras/autokeras/search.py", line 276, in train
verbose=verbose).train_model(**trainer_args)
File "/home/boyuan/workspace/lab/autokeras/autokeras/nn/model_trainer.py", line 126, in train_model
self._train()
File "/home/boyuan/workspace/lab/autokeras/autokeras/nn/model_trainer.py", line 157, in _train
for batch_idx, (inputs, targets) in enumerate(deepcopy(loader)):
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 314, in __next__
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 314, in <listcomp>
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/boyuan/workspace/lab/autokeras/autokeras/preprocessor.py", line 302, in __getitem__
return self.compose(feature), self.target[index]
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 49, in __call__
img = t(img)
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 143, in __call__
return F.normalize(tensor, self.mean, self.std)
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/functional.py", line 165, in normalize
raise TypeError('tensor is not a torch image.')
TypeError: tensor is not a torch image.
|
TypeError
|
def _transform(self, compose_list, data, targets):
args = [0, len(data.shape) - 1] + list(range(1, len(data.shape) - 1))
data = torch.Tensor(data.transpose(*args))
data_transforms = Compose(compose_list)
return MultiTransformDataset(data, targets, data_transforms)
|
def _transform(self, compose_list, data, targets):
data = data / self.max_val
args = [0, len(data.shape) - 1] + list(range(1, len(data.shape) - 1))
data = torch.Tensor(data.transpose(*args))
data_transforms = Compose(compose_list)
return MultiTransformDataset(data, targets, data_transforms)
|
https://github.com/keras-team/autokeras/issues/385
|
Using TensorFlow backend.
Saving Directory: /tmp/autokeras_ASE3DJ
Initializing search.
Initialization finished.
+----------------------------------------------+
| Training model 0 |
+----------------------------------------------+
Using TensorFlow backend.
Epoch-1, Current Metric - 0: 0%| | 0/469 [00:00<?, ? batch/s]Process SpawnProcess-1:
Traceback (most recent call last):
File "/home/boyuan/anaconda3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/home/boyuan/anaconda3/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/home/boyuan/workspace/lab/autokeras/autokeras/search.py", line 276, in train
verbose=verbose).train_model(**trainer_args)
File "/home/boyuan/workspace/lab/autokeras/autokeras/nn/model_trainer.py", line 126, in train_model
self._train()
File "/home/boyuan/workspace/lab/autokeras/autokeras/nn/model_trainer.py", line 157, in _train
for batch_idx, (inputs, targets) in enumerate(deepcopy(loader)):
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 314, in __next__
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 314, in <listcomp>
batch = self.collate_fn([self.dataset[i] for i in indices])
File "/home/boyuan/workspace/lab/autokeras/autokeras/preprocessor.py", line 302, in __getitem__
return self.compose(feature), self.target[index]
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 49, in __call__
img = t(img)
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/transforms.py", line 143, in __call__
return F.normalize(tensor, self.mean, self.std)
File "/home/boyuan/anaconda3/lib/python3.6/site-packages/torchvision/transforms/functional.py", line 165, in normalize
raise TypeError('tensor is not a torch image.')
TypeError: tensor is not a torch image.
|
TypeError
|
def __init__(
self, verbose=False, path=None, resume=False, searcher_args=None, augment=None
):
"""Initialize the instance.
The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
Otherwise it would create a new one.
Args:
verbose: A boolean of whether the search process will be printed to stdout.
path: A string. The path to a directory, where the intermediate results are saved.
resume: A boolean. If True, the classifier will continue to previous work saved in path.
Otherwise, the classifier will start a new search.
augment: A boolean value indicating whether the data needs augmentation. If not define, then it
will use the value of Constant.DATA_AUGMENTATION which is True by default.
"""
super().__init__(verbose)
if searcher_args is None:
searcher_args = {}
if path is None:
path = temp_folder_generator()
if augment is None:
augment = Constant.DATA_AUGMENTATION
self.path = path
if has_file(os.path.join(self.path, "classifier")) and resume:
classifier = pickle_from_file(os.path.join(self.path, "classifier"))
self.__dict__ = classifier.__dict__
else:
self.y_encoder = None
self.data_transformer = None
self.verbose = verbose
self.augment = augment
self.cnn = CnnModule(self.loss, self.metric, searcher_args, path, verbose)
self.resize_height = None
self.resize_width = None
|
def __init__(
self, verbose=False, path=None, resume=False, searcher_args=None, augment=None
):
"""Initialize the instance.
The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
Otherwise it would create a new one.
Args:
verbose: A boolean of whether the search process will be printed to stdout.
path: A string. The path to a directory, where the intermediate results are saved.
resume: A boolean. If True, the classifier will continue to previous work saved in path.
Otherwise, the classifier will start a new search.
augment: A boolean value indicating whether the data needs augmentation. If not define, then it
will use the value of Constant.DATA_AUGMENTATION which is True by default.
"""
super().__init__(verbose)
if searcher_args is None:
searcher_args = {}
if path is None:
path = temp_folder_generator()
if augment is None:
augment = Constant.DATA_AUGMENTATION
self.path = path
if has_file(os.path.join(self.path, "classifier")) and resume:
classifier = pickle_from_file(os.path.join(self.path, "classifier"))
self.__dict__ = classifier.__dict__
else:
self.y_encoder = None
self.data_transformer = None
self.verbose = verbose
self.augment = augment
self.cnn = CnnModule(self.loss, self.metric, searcher_args, path, verbose)
|
https://github.com/keras-team/autokeras/issues/193
|
╒==============================================╕
| Training model 1 |
╘==============================================╛
Using TensorFlow backend.
Current Epoch: 0%| | 0/1 [00:00<?, ? batch/s]Exception ignored in: <bound method tqdm.__del__ of Current Epoch: 0%| | 0/1 [07:59<?, ? batch/s]>
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 885, in __del__
self.close()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 1090, in close
self._decr_instances(self)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 454, in _decr_instances
cls.monitor.exit()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_monitor.py", line 52, in exit
self.join()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/threading.py", line 1053, in join
raise RuntimeError("cannot join current thread")
RuntimeError: cannot join current thread
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/search.py", line 296, in train
verbose=verbose).train_model(**trainer_args)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 103, in train_model
self._train()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 147, in _train
outputs = self.model(inputs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/graph.py", line 610, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py", line 66, in forward
exponential_average_factor, self.eps)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/functional.py", line 1254, in batch_norm
training, momentum, eps, torch.backends.cudnn.enabled
RuntimeError: $ Torch: not enough memory: you tried to allocate 0GB. Buy new RAM! at [/pytorch/aten/src/TH/THGeneral.cpp:204]
|
RuntimeError
|
def fit(self, x, y, x_test=None, y_test=None, time_limit=None):
x = np.array(x)
if len(x.shape) != 0 and len(x[0].shape) == 3:
self.resize_height, self.resize_width = compute_image_resize_params(x)
x = resize_image_data(x, self.resize_height, self.resize_width)
if x_test is not None:
x_test = resize_image_data(x_test, self.resize_height, self.resize_width)
y = np.array(y).flatten()
validate_xy(x, y)
y = self.transform_y(y)
if x_test is None or y_test is None:
# Divide training data into training and testing data.
validation_set_size = int(len(y) * Constant.VALIDATION_SET_SIZE)
validation_set_size = min(validation_set_size, 500)
validation_set_size = max(validation_set_size, 1)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=validation_set_size, random_state=42
)
else:
x_train = x
y_train = y
# Transform x_train
if self.data_transformer is None:
self.data_transformer = ImageDataTransformer(x, augment=self.augment)
# Wrap the data into DataLoaders
train_data = self.data_transformer.transform_train(x_train, y_train)
test_data = self.data_transformer.transform_test(x_test, y_test)
# Save the classifier
pickle_to_file(self, os.path.join(self.path, "classifier"))
if time_limit is None:
time_limit = 24 * 60 * 60
self.cnn.fit(
self.get_n_output_node(), x_train.shape, train_data, test_data, time_limit
)
|
def fit(self, x, y, x_test=None, y_test=None, time_limit=None):
x = np.array(x)
y = np.array(y).flatten()
validate_xy(x, y)
y = self.transform_y(y)
if x_test is None or y_test is None:
# Divide training data into training and testing data.
validation_set_size = int(len(y) * Constant.VALIDATION_SET_SIZE)
validation_set_size = min(validation_set_size, 500)
validation_set_size = max(validation_set_size, 1)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=validation_set_size, random_state=42
)
else:
x_train = x
y_train = y
# Transform x_train
if self.data_transformer is None:
self.data_transformer = ImageDataTransformer(x, augment=self.augment)
# Wrap the data into DataLoaders
train_data = self.data_transformer.transform_train(x_train, y_train)
test_data = self.data_transformer.transform_test(x_test, y_test)
# Save the classifier
pickle_to_file(self, os.path.join(self.path, "classifier"))
if time_limit is None:
time_limit = 24 * 60 * 60
self.cnn.fit(
self.get_n_output_node(), x_train.shape, train_data, test_data, time_limit
)
|
https://github.com/keras-team/autokeras/issues/193
|
╒==============================================╕
| Training model 1 |
╘==============================================╛
Using TensorFlow backend.
Current Epoch: 0%| | 0/1 [00:00<?, ? batch/s]Exception ignored in: <bound method tqdm.__del__ of Current Epoch: 0%| | 0/1 [07:59<?, ? batch/s]>
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 885, in __del__
self.close()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 1090, in close
self._decr_instances(self)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 454, in _decr_instances
cls.monitor.exit()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_monitor.py", line 52, in exit
self.join()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/threading.py", line 1053, in join
raise RuntimeError("cannot join current thread")
RuntimeError: cannot join current thread
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/search.py", line 296, in train
verbose=verbose).train_model(**trainer_args)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 103, in train_model
self._train()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 147, in _train
outputs = self.model(inputs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/graph.py", line 610, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py", line 66, in forward
exponential_average_factor, self.eps)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/functional.py", line 1254, in batch_norm
training, momentum, eps, torch.backends.cudnn.enabled
RuntimeError: $ Torch: not enough memory: you tried to allocate 0GB. Buy new RAM! at [/pytorch/aten/src/TH/THGeneral.cpp:204]
|
RuntimeError
|
def evaluate(self, x_test, y_test):
"""Return the accuracy score between predict value and `y_test`."""
if len(x_test.shape) != 0 and len(x_test[0].shape) == 3:
x_test = resize_image_data(x_test, self.resize_height, self.resize_width)
y_predict = self.predict(x_test)
return self.metric().evaluate(y_test, y_predict)
|
def evaluate(self, x_test, y_test):
"""Return the accuracy score between predict value and `y_test`."""
y_predict = self.predict(x_test)
return self.metric().evaluate(y_test, y_predict)
|
https://github.com/keras-team/autokeras/issues/193
|
╒==============================================╕
| Training model 1 |
╘==============================================╛
Using TensorFlow backend.
Current Epoch: 0%| | 0/1 [00:00<?, ? batch/s]Exception ignored in: <bound method tqdm.__del__ of Current Epoch: 0%| | 0/1 [07:59<?, ? batch/s]>
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 885, in __del__
self.close()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 1090, in close
self._decr_instances(self)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 454, in _decr_instances
cls.monitor.exit()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_monitor.py", line 52, in exit
self.join()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/threading.py", line 1053, in join
raise RuntimeError("cannot join current thread")
RuntimeError: cannot join current thread
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/search.py", line 296, in train
verbose=verbose).train_model(**trainer_args)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 103, in train_model
self._train()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 147, in _train
outputs = self.model(inputs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/graph.py", line 610, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py", line 66, in forward
exponential_average_factor, self.eps)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/functional.py", line 1254, in batch_norm
training, momentum, eps, torch.backends.cudnn.enabled
RuntimeError: $ Torch: not enough memory: you tried to allocate 0GB. Buy new RAM! at [/pytorch/aten/src/TH/THGeneral.cpp:204]
|
RuntimeError
|
def final_fit(self, x_train, y_train, x_test, y_test, trainer_args=None, retrain=False):
"""Final training after found the best architecture.
Args:
x_train: A numpy.ndarray of training data.
y_train: A numpy.ndarray of training targets.
x_test: A numpy.ndarray of testing data.
y_test: A numpy.ndarray of testing targets.
trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.
retrain: A boolean of whether reinitialize the weights of the model.
"""
if trainer_args is None:
trainer_args = {"max_no_improvement_num": 30}
if len(x_train.shape) != 0 and len(x_train[0].shape) == 3:
x_train = resize_image_data(x_train, self.resize_height, self.resize_width)
if x_test is not None:
x_test = resize_image_data(x_test, self.resize_height, self.resize_width)
y_train = self.transform_y(y_train)
y_test = self.transform_y(y_test)
train_data = self.data_transformer.transform_train(x_train, y_train)
test_data = self.data_transformer.transform_test(x_test, y_test)
self.cnn.final_fit(train_data, test_data, trainer_args, retrain)
|
def final_fit(self, x_train, y_train, x_test, y_test, trainer_args=None, retrain=False):
"""Final training after found the best architecture.
Args:
x_train: A numpy.ndarray of training data.
y_train: A numpy.ndarray of training targets.
x_test: A numpy.ndarray of testing data.
y_test: A numpy.ndarray of testing targets.
trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.
retrain: A boolean of whether reinitialize the weights of the model.
"""
if trainer_args is None:
trainer_args = {"max_no_improvement_num": 30}
y_train = self.transform_y(y_train)
y_test = self.transform_y(y_test)
train_data = self.data_transformer.transform_train(x_train, y_train)
test_data = self.data_transformer.transform_test(x_test, y_test)
self.cnn.final_fit(train_data, test_data, trainer_args, retrain)
|
https://github.com/keras-team/autokeras/issues/193
|
╒==============================================╕
| Training model 1 |
╘==============================================╛
Using TensorFlow backend.
Current Epoch: 0%| | 0/1 [00:00<?, ? batch/s]Exception ignored in: <bound method tqdm.__del__ of Current Epoch: 0%| | 0/1 [07:59<?, ? batch/s]>
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 885, in __del__
self.close()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 1090, in close
self._decr_instances(self)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 454, in _decr_instances
cls.monitor.exit()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_monitor.py", line 52, in exit
self.join()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/threading.py", line 1053, in join
raise RuntimeError("cannot join current thread")
RuntimeError: cannot join current thread
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/search.py", line 296, in train
verbose=verbose).train_model(**trainer_args)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 103, in train_model
self._train()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 147, in _train
outputs = self.model(inputs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/graph.py", line 610, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py", line 66, in forward
exponential_average_factor, self.eps)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/functional.py", line 1254, in batch_norm
training, momentum, eps, torch.backends.cudnn.enabled
RuntimeError: $ Torch: not enough memory: you tried to allocate 0GB. Buy new RAM! at [/pytorch/aten/src/TH/THGeneral.cpp:204]
|
RuntimeError
|
def export_autokeras_model(self, model_file_name):
"""Creates and Exports the AutoKeras model to the given filename."""
portable_model = PortableImageSupervised(
graph=self.cnn.best_model,
y_encoder=self.y_encoder,
data_transformer=self.data_transformer,
metric=self.metric,
inverse_transform_y_method=self.inverse_transform_y,
resize_params=(self.resize_height, self.resize_width),
)
pickle_to_file(portable_model, model_file_name)
|
def export_autokeras_model(self, model_file_name):
"""Creates and Exports the AutoKeras model to the given filename."""
portable_model = PortableImageSupervised(
graph=self.cnn.best_model,
y_encoder=self.y_encoder,
data_transformer=self.data_transformer,
metric=self.metric,
inverse_transform_y_method=self.inverse_transform_y,
)
pickle_to_file(portable_model, model_file_name)
|
https://github.com/keras-team/autokeras/issues/193
|
╒==============================================╕
| Training model 1 |
╘==============================================╛
Using TensorFlow backend.
Current Epoch: 0%| | 0/1 [00:00<?, ? batch/s]Exception ignored in: <bound method tqdm.__del__ of Current Epoch: 0%| | 0/1 [07:59<?, ? batch/s]>
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 885, in __del__
self.close()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 1090, in close
self._decr_instances(self)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 454, in _decr_instances
cls.monitor.exit()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_monitor.py", line 52, in exit
self.join()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/threading.py", line 1053, in join
raise RuntimeError("cannot join current thread")
RuntimeError: cannot join current thread
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/search.py", line 296, in train
verbose=verbose).train_model(**trainer_args)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 103, in train_model
self._train()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 147, in _train
outputs = self.model(inputs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/graph.py", line 610, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py", line 66, in forward
exponential_average_factor, self.eps)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/functional.py", line 1254, in batch_norm
training, momentum, eps, torch.backends.cudnn.enabled
RuntimeError: $ Torch: not enough memory: you tried to allocate 0GB. Buy new RAM! at [/pytorch/aten/src/TH/THGeneral.cpp:204]
|
RuntimeError
|
def __init__(
self,
graph,
data_transformer,
y_encoder,
metric,
inverse_transform_y_method,
resize_params,
):
"""Initialize the instance.
Args:
graph: The graph form of the learned model
"""
super().__init__(graph)
self.data_transformer = data_transformer
self.y_encoder = y_encoder
self.metric = metric
self.inverse_transform_y_method = inverse_transform_y_method
self.resize_height = resize_params[0]
self.resize_width = resize_params[1]
|
def __init__(
self, graph, data_transformer, y_encoder, metric, inverse_transform_y_method
):
"""Initialize the instance.
Args:
graph: The graph form of the learned model
"""
super().__init__(graph)
self.data_transformer = data_transformer
self.y_encoder = y_encoder
self.metric = metric
self.inverse_transform_y_method = inverse_transform_y_method
|
https://github.com/keras-team/autokeras/issues/193
|
╒==============================================╕
| Training model 1 |
╘==============================================╛
Using TensorFlow backend.
Current Epoch: 0%| | 0/1 [00:00<?, ? batch/s]Exception ignored in: <bound method tqdm.__del__ of Current Epoch: 0%| | 0/1 [07:59<?, ? batch/s]>
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 885, in __del__
self.close()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 1090, in close
self._decr_instances(self)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_tqdm.py", line 454, in _decr_instances
cls.monitor.exit()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/tqdm/_monitor.py", line 52, in exit
self.join()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/threading.py", line 1053, in join
raise RuntimeError("cannot join current thread")
RuntimeError: cannot join current thread
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/search.py", line 296, in train
verbose=verbose).train_model(**trainer_args)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 103, in train_model
self._train()
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/model_trainer.py", line 147, in _train
outputs = self.model(inputs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/autokeras/graph.py", line 610, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py", line 66, in forward
exponential_average_factor, self.eps)
File "/root/anaconda3/envs/automl-formal/lib/python3.6/site-packages/torch/nn/functional.py", line 1254, in batch_norm
training, momentum, eps, torch.backends.cudnn.enabled
RuntimeError: $ Torch: not enough memory: you tried to allocate 0GB. Buy new RAM! at [/pytorch/aten/src/TH/THGeneral.cpp:204]
|
RuntimeError
|
def read_image(img_path):
img = imageio.imread(uri=img_path)
return img
|
def read_image(img_path):
img = ndimage.imread(fname=img_path)
return img
|
https://github.com/keras-team/autokeras/issues/226
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/Software/anaconda3/lib/python3.6/site-packages/autokeras/image_supervised.py in _validate(x_train, y_train)
24 try:
---> 25 x_train = x_train.astype('float64')
26 except ValueError:
ValueError: setting an array element with a sequence.
During handling of the above exception, another exception occurred:
[OUTPUT TRUNCATED]
|
ValueError
|
def get_device():
"""If Cuda is available, use Cuda device, else use CPU device
When choosing from Cuda devices, this function will choose the one with max memory available
Returns: string device name
"""
# TODO: could use gputil in the future
device = "cpu"
if torch.cuda.is_available():
smi_out = os.popen("nvidia-smi -q -d Memory | grep -A4 GPU|grep Free").read()
# smi_out=
# Free : xxxxxx MiB
# Free : xxxxxx MiB
# ....
visable_list = [
int(x) for x in os.getenv("CUDA_VISIBLE_DEVICES", "").split(",")
]
memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]
for cuda_index, _ in enumerate(memory_available):
if cuda_index not in visable_list and visable_list:
memory_available[cuda_index] = 0
if memory_available:
if max(memory_available) != 0:
device = "cuda:" + str(memory_available.index(max(memory_available)))
return device
|
def get_device():
"""If Cuda is available, use Cuda device, else use CPU device
When choosing from Cuda devices, this function will choose the one with max memory available
Returns: string device name
"""
# TODO: could use gputil in the future
if torch.cuda.is_available():
smi_out = os.popen("nvidia-smi -q -d Memory | grep -A4 GPU|grep Free").read()
# smi_out=
# Free : xxxxxx MiB
# Free : xxxxxx MiB
# ....
memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]
if not memory_available:
device = "cpu"
else:
device = "cuda:" + str(memory_available.index(max(memory_available)))
else:
device = "cpu"
return device
|
https://github.com/keras-team/autokeras/issues/189
|
Traceback (most recent call last):
File "/home/hoanghiep/miniconda3/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/hoanghiep/miniconda3/lib/python3.6/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/home/hoanghiep/miniconda3/lib/python3.6/site-packages/autokeras/search.py", line 296, in train
verbose=verbose).train_model(**trainer_args)
File "/home/hoanghiep/miniconda3/lib/python3.6/site-packages/autokeras/model_trainer.py", line 64, in __init__
self.model.to(self.device)
File "/home/hoanghiep/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 379, in to
return self._apply(convert)
File "/home/hoanghiep/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 185, in _apply
module._apply(fn)
File "/home/hoanghiep/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 191, in _apply
param.data = fn(param.data)
File "/home/hoanghiep/miniconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 377, in convert
return t.to(device, dtype if t.is_floating_point() else None, non_blocking)
RuntimeError: CUDA error (10): invalid device ordina
|
RuntimeError
|
def _dense_block_end_node(self, layer_id):
return self.layer_id_to_input_node_ids[layer_id][0]
|
def _dense_block_end_node(self, layer_id):
return self._block_end_node(layer_id, Constant.DENSE_BLOCK_DISTANCE)
|
https://github.com/keras-team/autokeras/issues/119
|
.....
Epoch 14: loss 0.9113655090332031, metric_value 0.9318181818181818
No loss decrease after 3 epochs
Father ID: 2
[('to_dense_deeper_model', 14), ('to_conv_deeper_model', 1, 5), ('to_add_skip_model', 20, 5), ('to_concat_skip_model', 1, 5), ('to_add_skip_model', 1, 20), ('to_wider_model', 5, 64), ('to_concat_skip_model', 20, 5), ('to_conv_deeper_model', 20, 5), ('to_dense_deeper_model', 18), ('to_concat_skip_model', 1, 20)]
Saving model.
Model ID: 7
Loss: tensor(0.9246, device='cuda:0')
Metric Value: 0.92739898989899
Training model 8
Traceback (most recent call last):
File "ak_my.py", line 15, in <module>
clf.fit(x_train, y_train, time_limit=12 * 60 * 60)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\image_supervised.py", line 238, in fit
run_searcher_once(train_data, test_data, self.path, int(time_remain))
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\image_supervised.py", line 41, in run_searcher_once
searcher.search(train_data, test_data, timeout)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\search.py", line 173, in search
self.metric, self.loss, self.verbose)]))
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\search.py", line 269, in train
verbose).train_model(**trainer_args)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\utils.py", line 101, in train_model
self._train()
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\utils.py", line 122, in _train
outputs = self.model(inputs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\graph.py", line 607, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\linear.py", line 55, in forward
return F.linear(input, self.weight, self.bias)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\functional.py", line 1024, in linear
return torch.addmm(bias, input, weight.t())
RuntimeError: size mismatch, m1: [128 x 2], m2: [64 x 64] at c:\programdata\miniconda3\conda-bld\pytorch_1533090623466\work\aten\src\thc\generic/THCTensorMathBlas.cu:249
|
RuntimeError
|
def extract_descriptor(self):
ret = NetworkDescriptor()
topological_node_list = self.topological_order
for u in topological_node_list:
for v, layer_id in self.adj_list[u]:
layer = self.layer_list[layer_id]
if is_layer(layer, "Conv") and layer.kernel_size not in [
1,
(1,),
(1, 1),
(1, 1, 1),
]:
ret.add_conv_width(layer_width(layer))
if is_layer(layer, "Dense"):
ret.add_dense_width(layer_width(layer))
# The position of each node, how many Conv and Dense layers before it.
pos = [0] * len(topological_node_list)
print(sorted(topological_node_list))
for v in topological_node_list:
layer_count = 0
for u, layer_id in self.reverse_adj_list[v]:
layer = self.layer_list[layer_id]
weighted = 0
if (
is_layer(layer, "Conv")
and layer.kernel_size not in [1, (1,), (1, 1), (1, 1, 1)]
) or is_layer(layer, "Dense"):
weighted = 1
layer_count = max(pos[u] + weighted, layer_count)
pos[v] = layer_count
for u in topological_node_list:
for v, layer_id in self.adj_list[u]:
if pos[u] == pos[v]:
continue
layer = self.layer_list[layer_id]
if is_layer(layer, "Concatenate"):
ret.add_skip_connection(
pos[u], pos[v], NetworkDescriptor.CONCAT_CONNECT
)
if is_layer(layer, "Add"):
ret.add_skip_connection(pos[u], pos[v], NetworkDescriptor.ADD_CONNECT)
return ret
|
def extract_descriptor(self):
ret = NetworkDescriptor()
topological_node_list = self.topological_order
for u in topological_node_list:
for v, layer_id in self.adj_list[u]:
layer = self.layer_list[layer_id]
if is_layer(layer, "Conv") and layer.kernel_size not in [
1,
(1,),
(1, 1),
(1, 1, 1),
]:
ret.add_conv_width(layer_width(layer))
if is_layer(layer, "Dense"):
ret.add_dense_width(layer_width(layer))
# The position of each node, how many Conv and Dense layers before it.
pos = [0] * len(topological_node_list)
for v in topological_node_list:
layer_count = 0
for u, layer_id in self.reverse_adj_list[v]:
layer = self.layer_list[layer_id]
weighted = 0
if (
is_layer(layer, "Conv")
and layer.kernel_size not in [1, (1,), (1, 1), (1, 1, 1)]
) or is_layer(layer, "Dense"):
weighted = 1
layer_count = max(pos[u] + weighted, layer_count)
pos[v] = layer_count
for u in topological_node_list:
for v, layer_id in self.adj_list[u]:
if pos[u] == pos[v]:
continue
layer = self.layer_list[layer_id]
if is_layer(layer, "Concatenate"):
ret.add_skip_connection(
pos[u], pos[v], NetworkDescriptor.CONCAT_CONNECT
)
if is_layer(layer, "Add"):
ret.add_skip_connection(pos[u], pos[v], NetworkDescriptor.ADD_CONNECT)
return ret
|
https://github.com/keras-team/autokeras/issues/119
|
.....
Epoch 14: loss 0.9113655090332031, metric_value 0.9318181818181818
No loss decrease after 3 epochs
Father ID: 2
[('to_dense_deeper_model', 14), ('to_conv_deeper_model', 1, 5), ('to_add_skip_model', 20, 5), ('to_concat_skip_model', 1, 5), ('to_add_skip_model', 1, 20), ('to_wider_model', 5, 64), ('to_concat_skip_model', 20, 5), ('to_conv_deeper_model', 20, 5), ('to_dense_deeper_model', 18), ('to_concat_skip_model', 1, 20)]
Saving model.
Model ID: 7
Loss: tensor(0.9246, device='cuda:0')
Metric Value: 0.92739898989899
Training model 8
Traceback (most recent call last):
File "ak_my.py", line 15, in <module>
clf.fit(x_train, y_train, time_limit=12 * 60 * 60)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\image_supervised.py", line 238, in fit
run_searcher_once(train_data, test_data, self.path, int(time_remain))
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\image_supervised.py", line 41, in run_searcher_once
searcher.search(train_data, test_data, timeout)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\search.py", line 173, in search
self.metric, self.loss, self.verbose)]))
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\search.py", line 269, in train
verbose).train_model(**trainer_args)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\utils.py", line 101, in train_model
self._train()
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\utils.py", line 122, in _train
outputs = self.model(inputs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\graph.py", line 607, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\linear.py", line 55, in forward
return F.linear(input, self.weight, self.bias)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\functional.py", line 1024, in linear
return torch.addmm(bias, input, weight.t())
RuntimeError: size mismatch, m1: [128 x 2], m2: [64 x 64] at c:\programdata\miniconda3\conda-bld\pytorch_1533090623466\work\aten\src\thc\generic/THCTensorMathBlas.cu:249
|
RuntimeError
|
def to_deeper_graph(graph):
weighted_layer_ids = graph.deep_layer_ids()
if len(weighted_layer_ids) >= Constant.MAX_MODEL_DEPTH:
return None
deeper_layer_ids = sample(weighted_layer_ids, 1)
# n_deeper_layer = randint(1, len(weighted_layer_ids))
# deeper_layer_ids = sample(weighted_layer_ids, n_deeper_layer)
for layer_id in deeper_layer_ids:
layer = graph.layer_list[layer_id]
if is_layer(layer, "Conv"):
graph.to_conv_deeper_model(layer_id, 3)
else:
graph.to_dense_deeper_model(layer_id)
return graph
|
def to_deeper_graph(graph):
weighted_layer_ids = graph.deep_layer_ids()
if len(weighted_layer_ids) >= Constant.MAX_MODEL_DEPTH:
return None
deeper_layer_ids = sample(weighted_layer_ids, 1)
# n_deeper_layer = randint(1, len(weighted_layer_ids))
# deeper_layer_ids = sample(weighted_layer_ids, n_deeper_layer)
for layer_id in deeper_layer_ids:
layer = graph.layer_list[layer_id]
if is_layer(layer, "Conv"):
graph.to_conv_deeper_model(layer_id, randint(1, 2) * 2 + 1)
else:
graph.to_dense_deeper_model(layer_id)
return graph
|
https://github.com/keras-team/autokeras/issues/119
|
.....
Epoch 14: loss 0.9113655090332031, metric_value 0.9318181818181818
No loss decrease after 3 epochs
Father ID: 2
[('to_dense_deeper_model', 14), ('to_conv_deeper_model', 1, 5), ('to_add_skip_model', 20, 5), ('to_concat_skip_model', 1, 5), ('to_add_skip_model', 1, 20), ('to_wider_model', 5, 64), ('to_concat_skip_model', 20, 5), ('to_conv_deeper_model', 20, 5), ('to_dense_deeper_model', 18), ('to_concat_skip_model', 1, 20)]
Saving model.
Model ID: 7
Loss: tensor(0.9246, device='cuda:0')
Metric Value: 0.92739898989899
Training model 8
Traceback (most recent call last):
File "ak_my.py", line 15, in <module>
clf.fit(x_train, y_train, time_limit=12 * 60 * 60)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\image_supervised.py", line 238, in fit
run_searcher_once(train_data, test_data, self.path, int(time_remain))
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\image_supervised.py", line 41, in run_searcher_once
searcher.search(train_data, test_data, timeout)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\search.py", line 173, in search
self.metric, self.loss, self.verbose)]))
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\search.py", line 269, in train
verbose).train_model(**trainer_args)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\utils.py", line 101, in train_model
self._train()
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\utils.py", line 122, in _train
outputs = self.model(inputs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\autokeras\graph.py", line 607, in forward
temp_tensor = torch_layer(edge_input_tensor)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\modules\linear.py", line 55, in forward
return F.linear(input, self.weight, self.bias)
File "C:\Users\kmbmjn\Anaconda3\lib\site-packages\torch\nn\functional.py", line 1024, in linear
return torch.addmm(bias, input, weight.t())
RuntimeError: size mismatch, m1: [128 x 2], m2: [64 x 64] at c:\programdata\miniconda3\conda-bld\pytorch_1533090623466\work\aten\src\thc\generic/THCTensorMathBlas.cu:249
|
RuntimeError
|
def _raise_passphrase_exception(self):
if self._passphrase_helper is not None:
self._passphrase_helper.raise_if_problem(Error)
_raise_current_error()
|
def _raise_passphrase_exception(self):
if self._passphrase_helper is None:
_raise_current_error()
exception = self._passphrase_helper.raise_if_problem(Error)
if exception is not None:
raise exception
|
https://github.com/pyca/pyopenssl/issues/119
|
/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/spread/jelly.py:93: DeprecationWarning: the sets module is deprecated
import sets as _sets
/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/endpoints.py:30: DeprecationWarning: twisted.internet.interfaces.IStreamClientEndpointStringParser was deprecated in Twisted 14.0.0: This interface has been superseded by IStreamClientEndpointStringParserWithReactor.
from twisted.internet.interfaces import (
Traceback (most recent call last):
File "/usr/local/bin/twistd", line 14, in <module>
run()
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/scripts/twistd.py", line 27, in run
app.run(runApp, ServerOptions)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/application/app.py", line 642, in run
runApp(config)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/scripts/twistd.py", line 23, in runApp
_SomeApplicationRunner(config).run()
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/application/app.py", line 376, in run
self.application = self.createOrGetApplication()
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/application/app.py", line 436, in createOrGetApplication
ser = plg.makeService(self.config.subOptions)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/web/tap.py", line 230, in makeService
strports.service(config['port'], site).setServiceParent(s)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/application/strports.py", line 77, in service
endpoints._serverFromStringLegacy(reactor, description, default),
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/endpoints.py", line 1407, in _serverFromStringLegacy
nameOrPlugin, args, kw = _parseServer(description, None, default)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/endpoints.py", line 1398, in _parseServer
return (endpointType.upper(),) + parser(factory, *args[1:], **kw)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/endpoints.py", line 1118, in _parseSSL
privateCertificate = ssl.PrivateCertificate.loadPEM(certPEM + keyPEM)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/_sslverify.py", line 619, in loadPEM
return Class.load(data, KeyPair.load(data, crypto.FILETYPE_PEM),
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/_sslverify.py", line 725, in load
return Class(crypto.load_privatekey(format, data))
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/OpenSSL/crypto.py", line 2010, in load_privatekey
_raise_current_error()
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/OpenSSL/_util.py", line 22, in exception_from_error_queue
raise exceptionType(errors)
OpenSSL.crypto.Error: []
|
OpenSSL.crypto.Error
|
def raise_if_problem(self, exceptionType=Error):
if self._problems:
# Flush the OpenSSL error queue
try:
_exception_from_error_queue(exceptionType)
except exceptionType:
pass
raise self._problems.pop(0)
|
def raise_if_problem(self, exceptionType=Error):
try:
_exception_from_error_queue(exceptionType)
except exceptionType as e:
from_queue = e
if self._problems:
raise self._problems[0]
return from_queue
|
https://github.com/pyca/pyopenssl/issues/119
|
/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/spread/jelly.py:93: DeprecationWarning: the sets module is deprecated
import sets as _sets
/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/endpoints.py:30: DeprecationWarning: twisted.internet.interfaces.IStreamClientEndpointStringParser was deprecated in Twisted 14.0.0: This interface has been superseded by IStreamClientEndpointStringParserWithReactor.
from twisted.internet.interfaces import (
Traceback (most recent call last):
File "/usr/local/bin/twistd", line 14, in <module>
run()
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/scripts/twistd.py", line 27, in run
app.run(runApp, ServerOptions)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/application/app.py", line 642, in run
runApp(config)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/scripts/twistd.py", line 23, in runApp
_SomeApplicationRunner(config).run()
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/application/app.py", line 376, in run
self.application = self.createOrGetApplication()
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/application/app.py", line 436, in createOrGetApplication
ser = plg.makeService(self.config.subOptions)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/web/tap.py", line 230, in makeService
strports.service(config['port'], site).setServiceParent(s)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/application/strports.py", line 77, in service
endpoints._serverFromStringLegacy(reactor, description, default),
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/endpoints.py", line 1407, in _serverFromStringLegacy
nameOrPlugin, args, kw = _parseServer(description, None, default)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/endpoints.py", line 1398, in _parseServer
return (endpointType.upper(),) + parser(factory, *args[1:], **kw)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/endpoints.py", line 1118, in _parseSSL
privateCertificate = ssl.PrivateCertificate.loadPEM(certPEM + keyPEM)
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/_sslverify.py", line 619, in loadPEM
return Class.load(data, KeyPair.load(data, crypto.FILETYPE_PEM),
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/twisted/internet/_sslverify.py", line 725, in load
return Class(crypto.load_privatekey(format, data))
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/OpenSSL/crypto.py", line 2010, in load_privatekey
_raise_current_error()
File "/usr/local/Cellar/python/2.7.6/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/OpenSSL/_util.py", line 22, in exception_from_error_queue
raise exceptionType(errors)
OpenSSL.crypto.Error: []
|
OpenSSL.crypto.Error
|
def stats(self):
"""
Returns
dict?: resource stats
"""
stats = self.get("stats")
if stats is None:
stats = {"hash": "", "bytes": 0}
if self.tabular:
stats.update({"fields": 0, "rows": 0})
stats = self.metadata_attach("stats", stats)
return stats
|
def stats(self):
"""
Returns
dict?: resource stats
"""
stats = {"hash": "", "bytes": 0, "fields": 0, "rows": 0}
return self.metadata_attach("stats", self.get("stats", stats))
|
https://github.com/frictionlessdata/frictionless-py/issues/641
|
Traceback (most recent call last):
File "C:\Users\user\programs\PyCharm\PyCharm Community Edition 2020.3.2\plugins\python-ce\helpers\pydev\_pydevd_bundle\pydevd_exec2.py", line 3, in Exec
exec(exp, global_vars, local_vars)
File "<input>", line 1, in <module>
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\describe\main.py", line 46, in describe
return describe(source, **options)
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\describe\resource.py", line 129, in describe_resource
with table as table:
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\table.py", line 217, in __enter__
self.open()
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\table.py", line 400, in open
self.__parser = system.create_parser(self.__resource)
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\system.py", line 161, in create_parser
raise FrictionlessException(errors.FormatError(note=note))
frictionless.exception.FrictionlessException: [format-error] The data source could not be successfully parsed: cannot create parser "pdf". Try installing "frictionless-pdf"
|
frictionless.exception.FrictionlessException
|
def open(self):
"""Open the resource as "io.open" does
Raises:
FrictionlessException: any exception that occurs
"""
self.close()
# Infer
self.pop("stats", None)
self["name"] = self.name
self["profile"] = self.profile
self["scheme"] = self.scheme
self["format"] = self.format
self["hashing"] = self.hashing
self["encoding"] = self.encoding
if self.innerpath:
self["innerpath"] = self.innerpath
if self.compression:
self["compression"] = self.compression
if self.control:
self["control"] = self.control
if self.dialect:
self["dialect"] = self.dialect
self["stats"] = self.stats
# Validate
if self.metadata_errors:
error = self.metadata_errors[0]
raise FrictionlessException(error)
# Open
try:
# Table
if self.tabular:
self.__parser = system.create_parser(self)
self.__parser.open()
self.__read_detect_layout()
self.__read_detect_schema()
if not self.__nolookup:
self.__lookup = self.__read_detect_lookup()
self.__header = self.__read_header()
self.__row_stream = self.__read_row_stream()
return self
# File
else:
self.__loader = system.create_loader(self)
self.__loader.open()
return self
# Error
except Exception:
self.close()
raise
|
def open(self):
"""Open the resource as "io.open" does
Raises:
FrictionlessException: any exception that occurs
"""
self.close()
# Infer
self["name"] = self.name
self["profile"] = self.profile
self["scheme"] = self.scheme
self["format"] = self.format
self["hashing"] = self.hashing
self["encoding"] = self.encoding
if self.innerpath:
self["innerpath"] = self.innerpath
if self.compression:
self["compression"] = self.compression
if self.control:
self["control"] = self.control
if self.dialect:
self["dialect"] = self.dialect
self["stats"] = {"hash": "", "bytes": 0, "fields": 0, "rows": 0}
# Validate
if self.metadata_errors:
error = self.metadata_errors[0]
raise FrictionlessException(error)
# Open
try:
# Table
if self.tabular:
self.__parser = system.create_parser(self)
self.__parser.open()
self.__read_detect_layout()
self.__read_detect_schema()
if not self.__nolookup:
self.__lookup = self.__read_detect_lookup()
self.__header = self.__read_header()
self.__row_stream = self.__read_row_stream()
return self
# File
else:
self.__loader = system.create_loader(self)
self.__loader.open()
return self
# Error
except Exception:
self.close()
raise
|
https://github.com/frictionlessdata/frictionless-py/issues/641
|
Traceback (most recent call last):
File "C:\Users\user\programs\PyCharm\PyCharm Community Edition 2020.3.2\plugins\python-ce\helpers\pydev\_pydevd_bundle\pydevd_exec2.py", line 3, in Exec
exec(exp, global_vars, local_vars)
File "<input>", line 1, in <module>
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\describe\main.py", line 46, in describe
return describe(source, **options)
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\describe\resource.py", line 129, in describe_resource
with table as table:
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\table.py", line 217, in __enter__
self.open()
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\table.py", line 400, in open
self.__parser = system.create_parser(self.__resource)
File "C:\Users\user\programs\miniconda\envs\WPP2\lib\site-packages\frictionless\system.py", line 161, in create_parser
raise FrictionlessException(errors.FormatError(note=note))
frictionless.exception.FrictionlessException: [format-error] The data source could not be successfully parsed: cannot create parser "pdf". Try installing "frictionless-pdf"
|
frictionless.exception.FrictionlessException
|
def to_dict(self):
"""Convert metadata to a dict
Returns:
dict: metadata as a dict
"""
return helpers.deepnative(self)
|
def to_dict(self):
"""Convert metadata to a dict
Returns:
dict: metadata as a dict
"""
return self.copy()
|
https://github.com/frictionlessdata/frictionless-py/issues/453
|
In [28]: package.to_yaml('test.yml')
---------------------------------------------------------------------------
RepresenterError Traceback (most recent call last)
~/.local/lib/python3.8/site-packages/frictionless/metadata.py in to_yaml(self, target)
137 with tempfile.NamedTemporaryFile("wt", delete=False) as file:
--> 138 yaml.dump(self.to_dict(), file, Dumper=IndentDumper)
139 helpers.move_file(file.name, target)
/usr/lib/python3.8/site-packages/yaml/__init__.py in dump(data, stream, Dumper, **kwds)
289 """
--> 290 return dump_all([data], stream, Dumper=Dumper, **kwds)
291
/usr/lib/python3.8/site-packages/yaml/__init__.py in dump_all(documents, stream, Dumper, default_style, default_flow_style, canonical, indent, width, allow_unicode, line_break, encoding, explicit_start, explicit_end, version, tags, sort_keys)
277 for data in documents:
--> 278 dumper.represent(data)
279 dumper.close()
/usr/lib/python3.8/site-packages/yaml/representer.py in represent(self, data)
26 def represent(self, data):
---> 27 node = self.represent_data(data)
28 self.serialize(node)
/usr/lib/python3.8/site-packages/yaml/representer.py in represent_data(self, data)
47 if data_types[0] in self.yaml_representers:
---> 48 node = self.yaml_representers[data_types[0]](self, data)
49 else:
/usr/lib/python3.8/site-packages/yaml/representer.py in represent_dict(self, data)
206 def represent_dict(self, data):
--> 207 return self.represent_mapping('tag:yaml.org,2002:map', data)
208
/usr/lib/python3.8/site-packages/yaml/representer.py in represent_mapping(self, tag, mapping, flow_style)
117 node_key = self.represent_data(item_key)
--> 118 node_value = self.represent_data(item_value)
119 if not (isinstance(node_key, ScalarNode) and not node_key.style):
/usr/lib/python3.8/site-packages/yaml/representer.py in represent_data(self, data)
57 elif None in self.yaml_representers:
---> 58 node = self.yaml_representers[None](self, data)
59 else:
/usr/lib/python3.8/site-packages/yaml/representer.py in represent_undefined(self, data)
230 def represent_undefined(self, data):
--> 231 raise RepresenterError("cannot represent an object", data)
232
RepresenterError: ('cannot represent an object', [{'path': 'test/data/data.csv', 'control': {'newline': ''}, 'schema': {'fields': [{'name': 'atomic number', 'type': 'integer'}, {'name': 'symbol', 'type': 'string'}, {'name': 'name', 'type': 'string'}, {'name': 'atomic mass', 'type': 'number'}, {'name': 'metal or nonmetal?', 'type': 'string'}]}, 'profile': 'tabular-data-resource', 'name': 'data', 'scheme': 'file', 'format': 'csv', 'hashing': 'md5', 'encoding': 'utf-8', 'compression': 'no', 'compressionPath': '', 'dialect': {}, 'query': {}, 'stats': {'hash': 'ec86e3663d1e497ff100dc66002cb88b', 'bytes': 4252, 'fields': 5, 'rows': 118}}])
|
RepresenterError
|
def create_storage(self, name, **options):
if name == "bigquery":
return BigqueryStorage(**options)
|
def create_storage(self, name, **options):
pass
|
https://github.com/frictionlessdata/frictionless-py/issues/422
|
In [1]: from frictionless import describe_package
In [2]: csv = 'a,b\n0,1'
In [3]: with open('test.csv', 'w') as f:
...: f.write(csv)
...:
In [4]: package = describe_package('test.csv')
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-4-4f4f7e6b6d26> in <module>
----> 1 package = describe_package('test.csv')
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/describe/package.py in describe_package(source, basepath, trusted, expand)
22 # Infer package
23 package = Package(basepath=basepath, trusted=trusted)
---> 24 package.infer(source)
25
26 # Expand package
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/package.py in infer(self, source, only_sample)
223 # General
224 for resource in self.resources:
--> 225 resource.infer(only_sample=only_sample)
226
227 # Import/Export
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in infer(self, source, only_sample)
372
373 # Tabular
--> 374 if self.tabular:
375 with self.to_table() as table:
376 patch["profile"] = "tabular-data-resource"
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in tabular(self)
209 bool: if resource is tabular
210 """
--> 211 table = self.to_table()
212 try:
213 table.open()
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in to_table(self, **options)
684 if "lookup" not in options:
685 options["lookup"] = self.read_lookup()
--> 686 return Table(**options)
687
688 def to_file(self, **options):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/table.py in __init__(self, source, scheme, format, hashing, encoding, compression, compression_path, control, dialect, query, headers, schema, sync_schema, patch_schema, infer_type, infer_names, infer_volume, infer_confidence, infer_missing_values, lookup)
191 control=control,
192 dialect=dialect,
--> 193 query=query,
194 )
195
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/file.py in __init__(self, source, scheme, format, hashing, encoding, compression, compression_path, control, dialect, query, newline, stats)
94
95 # Initialize file
---> 96 super().__init__()
97
98 def __enter__(self):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/metadata.py in __init__(self, descriptor)
40 for key, value in metadata.items():
41 dict.setdefault(self, key, value)
---> 42 self.__onchange__()
43
44 def __setattr__(self, name, value):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/metadata.py in __onchange__(self, onchange)
66 if reset and key in self.__dict__:
67 self.__dict__.pop(key)
---> 68 self.metadata_process()
69 if self.metadata_strict:
70 for error in self.metadata_errors:
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/file.py in metadata_process(self)
371 dialect = self.get("dialect")
372 if dialect is not None:
--> 373 dialect = system.create_dialect(self, descriptor=dialect)
374 dict.__setitem__(self, "dialect", dialect)
375
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in create_dialect(self, file, descriptor)
110 name = file.format
111 dialects = import_module("frictionless.dialects")
--> 112 for func in self.methods["create_dialect"].values():
113 dialect = func(file, descriptor=descriptor)
114 if dialect is not None:
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in methods(self)
229 for action in self.actions:
230 methods[action] = OrderedDict()
--> 231 for name, plugin in self.plugins.items():
232 if action in vars(type(plugin)):
233 func = getattr(plugin, action, None)
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in plugins(self)
246 module = import_module("frictionless.plugins")
247 for _, name, _ in pkgutil.iter_modules([os.path.dirname(module.__file__)]):
--> 248 module = import_module(f"frictionless.plugins.{name}")
249 modules[name] = module
250 plugins = OrderedDict()
/usr/lib/python3.6/importlib/__init__.py in import_module(name, package)
124 break
125 level += 1
--> 126 return _bootstrap._gcd_import(name[level:], package, level)
127
128
/usr/lib/python3.6/importlib/_bootstrap.py in _gcd_import(name, package, level)
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load(name, import_)
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
/usr/lib/python3.6/importlib/_bootstrap.py in _load_unlocked(spec)
/usr/lib/python3.6/importlib/_bootstrap_external.py in exec_module(self, module)
/usr/lib/python3.6/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/plugins/bigquery.py in <module>
4 import time
5 import unicodecsv
----> 6 from slugify import slugify
7 from ..resource import Resource
8 from ..storage import Storage
ModuleNotFoundError: No module named 'slugify'
|
ModuleNotFoundError
|
def __init__(self, service, project, dataset, prefix=""):
self.__service = service
self.__project = project
self.__dataset = dataset
self.__prefix = prefix
|
def __init__(self, service, project, dataset, prefix=""):
self.__service = service
self.__project = project
self.__dataset = dataset
self.__prefix = prefix
self.__names = None
self.__tables = {}
self.__fallbacks = {}
|
https://github.com/frictionlessdata/frictionless-py/issues/422
|
In [1]: from frictionless import describe_package
In [2]: csv = 'a,b\n0,1'
In [3]: with open('test.csv', 'w') as f:
...: f.write(csv)
...:
In [4]: package = describe_package('test.csv')
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-4-4f4f7e6b6d26> in <module>
----> 1 package = describe_package('test.csv')
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/describe/package.py in describe_package(source, basepath, trusted, expand)
22 # Infer package
23 package = Package(basepath=basepath, trusted=trusted)
---> 24 package.infer(source)
25
26 # Expand package
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/package.py in infer(self, source, only_sample)
223 # General
224 for resource in self.resources:
--> 225 resource.infer(only_sample=only_sample)
226
227 # Import/Export
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in infer(self, source, only_sample)
372
373 # Tabular
--> 374 if self.tabular:
375 with self.to_table() as table:
376 patch["profile"] = "tabular-data-resource"
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in tabular(self)
209 bool: if resource is tabular
210 """
--> 211 table = self.to_table()
212 try:
213 table.open()
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in to_table(self, **options)
684 if "lookup" not in options:
685 options["lookup"] = self.read_lookup()
--> 686 return Table(**options)
687
688 def to_file(self, **options):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/table.py in __init__(self, source, scheme, format, hashing, encoding, compression, compression_path, control, dialect, query, headers, schema, sync_schema, patch_schema, infer_type, infer_names, infer_volume, infer_confidence, infer_missing_values, lookup)
191 control=control,
192 dialect=dialect,
--> 193 query=query,
194 )
195
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/file.py in __init__(self, source, scheme, format, hashing, encoding, compression, compression_path, control, dialect, query, newline, stats)
94
95 # Initialize file
---> 96 super().__init__()
97
98 def __enter__(self):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/metadata.py in __init__(self, descriptor)
40 for key, value in metadata.items():
41 dict.setdefault(self, key, value)
---> 42 self.__onchange__()
43
44 def __setattr__(self, name, value):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/metadata.py in __onchange__(self, onchange)
66 if reset and key in self.__dict__:
67 self.__dict__.pop(key)
---> 68 self.metadata_process()
69 if self.metadata_strict:
70 for error in self.metadata_errors:
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/file.py in metadata_process(self)
371 dialect = self.get("dialect")
372 if dialect is not None:
--> 373 dialect = system.create_dialect(self, descriptor=dialect)
374 dict.__setitem__(self, "dialect", dialect)
375
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in create_dialect(self, file, descriptor)
110 name = file.format
111 dialects = import_module("frictionless.dialects")
--> 112 for func in self.methods["create_dialect"].values():
113 dialect = func(file, descriptor=descriptor)
114 if dialect is not None:
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in methods(self)
229 for action in self.actions:
230 methods[action] = OrderedDict()
--> 231 for name, plugin in self.plugins.items():
232 if action in vars(type(plugin)):
233 func = getattr(plugin, action, None)
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in plugins(self)
246 module = import_module("frictionless.plugins")
247 for _, name, _ in pkgutil.iter_modules([os.path.dirname(module.__file__)]):
--> 248 module = import_module(f"frictionless.plugins.{name}")
249 modules[name] = module
250 plugins = OrderedDict()
/usr/lib/python3.6/importlib/__init__.py in import_module(name, package)
124 break
125 level += 1
--> 126 return _bootstrap._gcd_import(name[level:], package, level)
127
128
/usr/lib/python3.6/importlib/_bootstrap.py in _gcd_import(name, package, level)
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load(name, import_)
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
/usr/lib/python3.6/importlib/_bootstrap.py in _load_unlocked(spec)
/usr/lib/python3.6/importlib/_bootstrap_external.py in exec_module(self, module)
/usr/lib/python3.6/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/plugins/bigquery.py in <module>
4 import time
5 import unicodecsv
----> 6 from slugify import slugify
7 from ..resource import Resource
8 from ..storage import Storage
ModuleNotFoundError: No module named 'slugify'
|
ModuleNotFoundError
|
def __read_data_stream(self, name, schema):
sav = helpers.import_from_plugin("savReaderWriter", plugin="spss")
path = self.__write_convert_name(name)
yield schema.field_names
with sav.SavReader(path, ioUtf8=True, rawMode=False) as reader:
for item in reader:
cells = []
for index, field in enumerate(schema.fields):
value = item[index]
# Fix decimals that should be integers
if field.type == "integer" and value is not None:
value = int(float(value))
# We need to decode bytes to strings
if isinstance(value, bytes):
value = value.decode(reader.fileEncoding)
# Time values need a decimal, add one if missing.
if field.type == "time" and not re.search(r"\.\d*", value):
value = "{}.0".format(value)
cells.append(value)
yield cells
|
def __read_data_stream(self, name, schema):
sav = helpers.import_from_plugin("savReaderWriter", plugin="spss")
path = self.__write_convert_name(name)
with sav.SavReader(path, ioUtf8=True, rawMode=False) as reader:
for item in reader:
cells = []
for index, field in enumerate(schema.fields):
value = item[index]
# Fix decimals that should be integers
if field.type == "integer" and value is not None:
value = int(float(value))
# We need to decode bytes to strings
if isinstance(value, bytes):
value = value.decode(reader.fileEncoding)
# Time values need a decimal, add one if missing.
if field.type == "time" and not re.search(r"\.\d*", value):
value = "{}.0".format(value)
cells.append(value)
yield cells
|
https://github.com/frictionlessdata/frictionless-py/issues/422
|
In [1]: from frictionless import describe_package
In [2]: csv = 'a,b\n0,1'
In [3]: with open('test.csv', 'w') as f:
...: f.write(csv)
...:
In [4]: package = describe_package('test.csv')
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-4-4f4f7e6b6d26> in <module>
----> 1 package = describe_package('test.csv')
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/describe/package.py in describe_package(source, basepath, trusted, expand)
22 # Infer package
23 package = Package(basepath=basepath, trusted=trusted)
---> 24 package.infer(source)
25
26 # Expand package
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/package.py in infer(self, source, only_sample)
223 # General
224 for resource in self.resources:
--> 225 resource.infer(only_sample=only_sample)
226
227 # Import/Export
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in infer(self, source, only_sample)
372
373 # Tabular
--> 374 if self.tabular:
375 with self.to_table() as table:
376 patch["profile"] = "tabular-data-resource"
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in tabular(self)
209 bool: if resource is tabular
210 """
--> 211 table = self.to_table()
212 try:
213 table.open()
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/resource.py in to_table(self, **options)
684 if "lookup" not in options:
685 options["lookup"] = self.read_lookup()
--> 686 return Table(**options)
687
688 def to_file(self, **options):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/table.py in __init__(self, source, scheme, format, hashing, encoding, compression, compression_path, control, dialect, query, headers, schema, sync_schema, patch_schema, infer_type, infer_names, infer_volume, infer_confidence, infer_missing_values, lookup)
191 control=control,
192 dialect=dialect,
--> 193 query=query,
194 )
195
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/file.py in __init__(self, source, scheme, format, hashing, encoding, compression, compression_path, control, dialect, query, newline, stats)
94
95 # Initialize file
---> 96 super().__init__()
97
98 def __enter__(self):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/metadata.py in __init__(self, descriptor)
40 for key, value in metadata.items():
41 dict.setdefault(self, key, value)
---> 42 self.__onchange__()
43
44 def __setattr__(self, name, value):
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/metadata.py in __onchange__(self, onchange)
66 if reset and key in self.__dict__:
67 self.__dict__.pop(key)
---> 68 self.metadata_process()
69 if self.metadata_strict:
70 for error in self.metadata_errors:
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/file.py in metadata_process(self)
371 dialect = self.get("dialect")
372 if dialect is not None:
--> 373 dialect = system.create_dialect(self, descriptor=dialect)
374 dict.__setitem__(self, "dialect", dialect)
375
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in create_dialect(self, file, descriptor)
110 name = file.format
111 dialects = import_module("frictionless.dialects")
--> 112 for func in self.methods["create_dialect"].values():
113 dialect = func(file, descriptor=descriptor)
114 if dialect is not None:
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in methods(self)
229 for action in self.actions:
230 methods[action] = OrderedDict()
--> 231 for name, plugin in self.plugins.items():
232 if action in vars(type(plugin)):
233 func = getattr(plugin, action, None)
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/helpers.py in __get__(self, instance, owner)
430 val = cache.get(self.attrname, config.UNDEFINED)
431 if val is config.UNDEFINED:
--> 432 val = self.func(instance)
433 try:
434 cache[self.attrname] = val
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/system.py in plugins(self)
246 module = import_module("frictionless.plugins")
247 for _, name, _ in pkgutil.iter_modules([os.path.dirname(module.__file__)]):
--> 248 module = import_module(f"frictionless.plugins.{name}")
249 modules[name] = module
250 plugins = OrderedDict()
/usr/lib/python3.6/importlib/__init__.py in import_module(name, package)
124 break
125 level += 1
--> 126 return _bootstrap._gcd_import(name[level:], package, level)
127
128
/usr/lib/python3.6/importlib/_bootstrap.py in _gcd_import(name, package, level)
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load(name, import_)
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
/usr/lib/python3.6/importlib/_bootstrap.py in _load_unlocked(spec)
/usr/lib/python3.6/importlib/_bootstrap_external.py in exec_module(self, module)
/usr/lib/python3.6/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)
/tmp/bug-frictionless/env/lib/python3.6/site-packages/frictionless/plugins/bigquery.py in <module>
4 import time
5 import unicodecsv
----> 6 from slugify import slugify
7 from ..resource import Resource
8 from ..storage import Storage
ModuleNotFoundError: No module named 'slugify'
|
ModuleNotFoundError
|
def prepare(self, stream, schema, extra):
# Prepare package
if "datapackage" not in extra or "resource-name" not in extra:
return False
descriptor = extra["datapackage"]
if descriptor.strip().startswith("{"):
descriptor = json.loads(descriptor)
self.__package = datapackage.Package(descriptor)
# Prepare schema
if not schema:
return False
if not schema.foreign_keys:
return False
self.__schema = schema
# Prepare foreign keys values
try:
self.__relations = _get_relations(
self.__package, self.__schema, current_resource_name=extra["resource-name"]
)
self.__foreign_keys_values = _get_foreign_keys_values(
self.__schema, self.__relations
)
self.__relations_exception = None
except _ReferenceTableError as exception:
self.__relations_exception = exception
return True
|
def prepare(self, stream, schema, extra):
# Prepare package
if "datapackage" not in extra or "resource-name" not in extra:
return False
descriptor = extra["datapackage"]
if descriptor.strip().startswith("{"):
descriptor = json.loads(descriptor)
self.__package = Package(descriptor)
# Prepare schema
if not schema:
return False
if not schema.foreign_keys:
return False
self.__schema = schema
# Prepare foreign keys values
relations = _get_relations(
self.__package, self.__schema, current_resource_name=extra["resource-name"]
)
self.__foreign_keys_values = _get_foreign_keys_values(self.__schema, relations)
return True
|
https://github.com/frictionlessdata/frictionless-py/issues/347
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
report = validate("datapackage.json", checks=["structure", "schema", "foreign-key"], order_fields=True, infer_fields=False)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/validate.py", line 80, in validate
report = inspector.inspect(source, **options)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/inspector.py", line 82, in inspect
table_warnings, table_report = task.get()
File "/home/didiez/anaconda3/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
File "/home/didiez/anaconda3/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/inspector.py", line 200, in __inspect_table
success = prepare_func(stream, schema, extra)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/contrib/checks/foreign_key.py", line 48, in prepare
current_resource_name=extra['resource-name'])
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/contrib/checks/foreign_key.py", line 116, in _get_relations
relations[resource_name] = resource.read(keyed=True)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/datapackage/resource.py", line 377, in read
foreign_keys_values=foreign_keys_values, **options)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 353, in read
for count, row in enumerate(rows, start=1):
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 215, in iter
for row_number, headers, row in iterator:
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 509, in builtin_processor
row, row_number=row_number, exc_handler=exc_handler)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/schema.py", line 266, in cast_row
error_data=keyed_row)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/helpers.py", line 90, in default_exc_handler
raise exc
datapackage.exceptions.CastError: Row length 5 doesn't match fields count 6 for row "2"
|
datapackage.exceptions.CastError
|
def check_row(self, cells):
row_number = cells[0]["row-number"]
errors = []
# We DON'T have relations to validate
if self.__relations_exception:
# Add a reference error
message = "Foreign key violation caused by invalid reference table: %s"
errors.append(
Error(
self.__code,
row_number=row_number,
message=message % self.__relations_exception,
)
)
# We have relations to validate
else:
# Prepare keyed_row
keyed_row = {}
for cell in cells:
if cell.get("field"):
keyed_row[cell.get("field").name] = cell.get("value")
# Resolve relations
for foreign_key in self.__schema.foreign_keys:
success = _resolve_relations(
deepcopy(keyed_row), self.__foreign_keys_values, foreign_key
)
if success is None:
message = 'Foreign key "{fields}" violation in row {row_number}'
message_substitutions = {"fields": foreign_key["fields"]}
# if not a composite foreign-key, add the cell causing the violation to improve the error details
# with the column-number
error_cell = None
if len(foreign_key["fields"]) == 1:
for cell in cells:
if cell["header"] == foreign_key["fields"][0]:
error_cell = cell
break
# Add an error
errors.append(
Error(
self.__code,
cell=error_cell,
row_number=row_number,
message=message,
message_substitutions=message_substitutions,
)
)
return errors
|
def check_row(self, cells):
row_number = cells[0]["row-number"]
# Prepare keyed_row
keyed_row = {}
for cell in cells:
if cell.get("field"):
keyed_row[cell.get("field").name] = cell.get("value")
# Resolve relations
errors = []
for foreign_key in self.__schema.foreign_keys:
success = _resolve_relations(
deepcopy(keyed_row), self.__foreign_keys_values, foreign_key
)
if success is None:
message = 'Foreign key "{fields}" violation in row {row_number}'
message_substitutions = {"fields": foreign_key["fields"]}
# if not a composite foreign-key, add the cell causing the violation to improve the error details
# with the column-number
error_cell = None
if len(foreign_key["fields"]) == 1:
for cell in cells:
if cell["header"] == foreign_key["fields"][0]:
error_cell = cell
break
errors.append(
Error(
self.__code,
cell=error_cell,
row_number=row_number,
message=message,
message_substitutions=message_substitutions,
)
)
return errors
|
https://github.com/frictionlessdata/frictionless-py/issues/347
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
report = validate("datapackage.json", checks=["structure", "schema", "foreign-key"], order_fields=True, infer_fields=False)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/validate.py", line 80, in validate
report = inspector.inspect(source, **options)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/inspector.py", line 82, in inspect
table_warnings, table_report = task.get()
File "/home/didiez/anaconda3/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
File "/home/didiez/anaconda3/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/inspector.py", line 200, in __inspect_table
success = prepare_func(stream, schema, extra)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/contrib/checks/foreign_key.py", line 48, in prepare
current_resource_name=extra['resource-name'])
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/contrib/checks/foreign_key.py", line 116, in _get_relations
relations[resource_name] = resource.read(keyed=True)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/datapackage/resource.py", line 377, in read
foreign_keys_values=foreign_keys_values, **options)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 353, in read
for count, row in enumerate(rows, start=1):
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 215, in iter
for row_number, headers, row in iterator:
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 509, in builtin_processor
row, row_number=row_number, exc_handler=exc_handler)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/schema.py", line 266, in cast_row
error_data=keyed_row)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/helpers.py", line 90, in default_exc_handler
raise exc
datapackage.exceptions.CastError: Row length 5 doesn't match fields count 6 for row "2"
|
datapackage.exceptions.CastError
|
def _get_relations(package, schema, current_resource_name=None):
# It's based on the following code:
# https://github.com/frictionlessdata/datapackage-py/blob/master/datapackage/resource.py#L393
# Prepare relations
relations = {}
for fk in schema.foreign_keys:
resource_name = fk["reference"].get("resource")
package_name = fk["reference"].get("package")
resource = None
# Self-referenced resource
if not resource_name:
for item in package.resources:
if item.name == current_resource_name:
resource = item
# Internal resource
elif not package_name:
resource = package.get_resource(resource_name)
# External resource (experimental)
# For now, we rely on uniqueness of resource names and support relative paths
else:
descriptor = package_name
if not descriptor.startswith("http"):
descriptor = "/".join([package.base_path, package_name])
package = datapackage.Package(descriptor)
resource = package.get_resource(resource_name)
# Add to relations (can be None)
relations[resource_name] = resource
if resource and resource.tabular:
try:
relations[resource_name] = resource.read(keyed=True)
# TODO: datapackage should raise `IntegrityError` here
except datapackage.exceptions.CastError as exception:
raise _ReferenceTableError("[%s] %s" % (resource_name, str(exception)))
return relations
|
def _get_relations(package, schema, current_resource_name=None):
# It's based on the following code:
# https://github.com/frictionlessdata/datapackage-py/blob/master/datapackage/resource.py#L393
# Prepare relations
relations = {}
for fk in schema.foreign_keys:
resource_name = fk["reference"].get("resource")
package_name = fk["reference"].get("package")
resource = None
# Self-referenced resource
if not resource_name:
for item in package.resources:
if item.name == current_resource_name:
resource = item
# Internal resource
elif not package_name:
resource = package.get_resource(resource_name)
# External resource (experimental)
# For now, we rely on uniqueness of resource names and support relative paths
else:
descriptor = package_name
if not descriptor.startswith("http"):
descriptor = "/".join([package.base_path, package_name])
package = Package(descriptor)
resource = package.get_resource(resource_name)
# Add to relations (can be None)
relations[resource_name] = resource
if resource and resource.tabular:
relations[resource_name] = resource.read(keyed=True)
return relations
|
https://github.com/frictionlessdata/frictionless-py/issues/347
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
report = validate("datapackage.json", checks=["structure", "schema", "foreign-key"], order_fields=True, infer_fields=False)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/validate.py", line 80, in validate
report = inspector.inspect(source, **options)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/inspector.py", line 82, in inspect
table_warnings, table_report = task.get()
File "/home/didiez/anaconda3/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
File "/home/didiez/anaconda3/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/inspector.py", line 200, in __inspect_table
success = prepare_func(stream, schema, extra)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/contrib/checks/foreign_key.py", line 48, in prepare
current_resource_name=extra['resource-name'])
File "/home/didiez/anaconda3/lib/python3.7/site-packages/goodtables/contrib/checks/foreign_key.py", line 116, in _get_relations
relations[resource_name] = resource.read(keyed=True)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/datapackage/resource.py", line 377, in read
foreign_keys_values=foreign_keys_values, **options)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 353, in read
for count, row in enumerate(rows, start=1):
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 215, in iter
for row_number, headers, row in iterator:
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/table.py", line 509, in builtin_processor
row, row_number=row_number, exc_handler=exc_handler)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/schema.py", line 266, in cast_row
error_data=keyed_row)
File "/home/didiez/anaconda3/lib/python3.7/site-packages/tableschema/helpers.py", line 90, in default_exc_handler
raise exc
datapackage.exceptions.CastError: Row length 5 doesn't match fields count 6 for row "2"
|
datapackage.exceptions.CastError
|
def missing_header(cells, sample):
errors = []
for cell in copy(cells):
# Skip if header in cell
if cell.get("header") is not None:
continue
# Add error
field_name = cell["field"].name if cell["field"] else ""
message_substitutions = {"field_name": '"{}"'.format(field_name)}
message = "There is a missing header in column {column_number}"
# It's a temporary solution for
# https://github.com/frictionlessdata/goodtables-py/issues/338
if not cell.get("column-number"):
message = "There is a missing header in column {field_name}"
error = Error(
"missing-header",
cell,
message=message,
message_substitutions=message_substitutions,
)
errors.append(error)
# Remove cell
cells.remove(cell)
return errors
|
def missing_header(cells, sample):
errors = []
for cell in copy(cells):
# Skip if header in cell
if cell.get("header") is not None:
continue
# Add error
message_substitutions = {
"field_name": '"{}"'.format(cell["field"].name),
}
error = Error(
"missing-header", cell, message_substitutions=message_substitutions
)
errors.append(error)
# Remove cell
cells.remove(cell)
return errors
|
https://github.com/frictionlessdata/frictionless-py/issues/337
|
Traceback (most recent call last):
File "bug.py", line 67, in <module>
report = validate(DESCRIPTOR, skip_checks=['extra-header'], order_fields=True, infer_fields=False)
File "/home/didiez/bug_goodtables/env/lib/python3.7/site-packages/goodtables/validate.py", line 80, in validate
report = inspector.inspect(source, **options)
File "/home/didiez/bug_goodtables/env/lib/python3.7/site-packages/goodtables/inspector.py", line 82, in inspect
table_warnings, table_report = task.get()
File "/home/didiez/anaconda3/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
File "/home/didiez/anaconda3/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/home/didiez/bug_goodtables/env/lib/python3.7/site-packages/goodtables/inspector.py", line 222, in __inspect_table
errors += (check_func(header_cells, sample) or [])
File "/home/didiez/bug_goodtables/env/lib/python3.7/site-packages/goodtables/checks/missing_header.py", line 26, in missing_header
'field_name': '"{}"'.format(cell['field'].name),
AttributeError: 'NoneType' object has no attribute 'name'
|
AttributeError
|
def __inspect_table(self, table):
# Start timer
start = datetime.datetime.now()
# Prepare vars
errors = []
headers = None
row_number = 0
fatal_error = False
checks = copy(self.__checks)
source = table["source"]
stream = table["stream"]
schema = table["schema"]
extra = table["extra"]
# Prepare table
try:
stream.open()
sample = stream.sample
headers = stream.headers
if self.__filter_checks(checks, type="schema"):
if schema is None and self.__infer_schema:
schema = Schema(infer(headers, sample))
if schema is None:
checks = self.__filter_checks(checks, type="schema", inverse=True)
except Exception as exception:
fatal_error = True
error = self.__compose_error_from_exception(exception)
errors.append(error)
# Prepare columns
if not fatal_error:
columns = []
fields = [None] * len(headers)
if schema is not None:
fields = schema.fields
iterator = zip_longest(headers, fields, fillvalue=_FILLVALUE)
for number, (header, field) in enumerate(iterator, start=1):
column = {"number": number}
if header is not _FILLVALUE:
column["header"] = header
if field is not _FILLVALUE:
column["field"] = field
columns.append(column)
# Head checks
if not fatal_error:
head_checks = self.__filter_checks(checks, context="head")
for check in head_checks:
if not columns:
break
check["func"](errors, columns, sample)
for error in errors:
error["row"] = None
# Body checks
if not fatal_error:
states = {}
colmap = {column["number"]: column for column in columns}
body_checks = self.__filter_checks(checks, context="body")
with stream:
extended_rows = stream.iter(extended=True)
while True:
try:
row_number, headers, row = next(extended_rows)
except StopIteration:
break
except Exception as exception:
fatal_error = True
error = self.__compose_error_from_exception(exception)
errors.append(error)
break
columns = []
iterator = zip_longest(headers, row, fillvalue=_FILLVALUE)
for number, (header, value) in enumerate(iterator, start=1):
colref = colmap.get(number, {})
column = {"number": number}
if header is not _FILLVALUE:
column["header"] = colref.get("header", header)
if "field" in colref:
column["field"] = colref["field"]
if value is not _FILLVALUE:
column["value"] = value
columns.append(column)
for check in body_checks:
if not columns:
break
state = states.setdefault(check["code"], {})
check["func"](errors, columns, row_number, state)
for error in reversed(errors):
if "row" in error:
break
error["row"] = row
if row_number >= self.__row_limit:
break
if len(errors) >= self.__error_limit:
break
# Stop timer
stop = datetime.datetime.now()
# Compose report
errors = errors[: self.__error_limit]
report = copy(extra)
report.update(
{
"time": round((stop - start).total_seconds(), 3),
"valid": not bool(errors),
"error-count": len(errors),
"row-count": row_number,
"headers": headers,
"source": source,
"errors": errors,
}
)
return report
|
def __inspect_table(self, table):
# Start timer
start = datetime.datetime.now()
# Prepare vars
errors = []
headers = None
row_number = 0
fatal_error = False
checks = copy(self.__checks)
source = table["source"]
stream = table["stream"]
schema = table["schema"]
extra = table["extra"]
# Prepare table
try:
stream.open()
sample = stream.sample
headers = stream.headers
if self.__filter_checks(checks, type="schema"):
if schema is None and self.__infer_schema:
schema = Schema(infer(headers, sample))
if schema is None:
checks = self.__filter_checks(checks, type="schema", inverse=True)
except Exception as exception:
fatal_error = True
message = str(exception)
if isinstance(exception, tabulator.exceptions.SourceError):
code = "source-error"
elif isinstance(exception, tabulator.exceptions.SchemeError):
code = "scheme-error"
elif isinstance(exception, tabulator.exceptions.FormatError):
code = "format-error"
elif isinstance(exception, tabulator.exceptions.EncodingError):
code = "encoding-error"
elif isinstance(exception, tabulator.exceptions.IOError):
code = "io-error"
elif isinstance(exception, tabulator.exceptions.HTTPError):
code = "http-error"
else:
raise
errors.append(
{
"row": None,
"code": code,
"message": message,
"row-number": None,
"column-number": None,
}
)
# Prepare columns
if not fatal_error:
columns = []
fields = [None] * len(headers)
if schema is not None:
fields = schema.fields
iterator = zip_longest(headers, fields, fillvalue=_FILLVALUE)
for number, (header, field) in enumerate(iterator, start=1):
column = {"number": number}
if header is not _FILLVALUE:
column["header"] = header
if field is not _FILLVALUE:
column["field"] = field
columns.append(column)
# Head checks
if not fatal_error:
head_checks = self.__filter_checks(checks, context="head")
for check in head_checks:
if not columns:
break
check["func"](errors, columns, sample)
for error in errors:
error["row"] = None
# Body checks
if not fatal_error:
states = {}
colmap = {column["number"]: column for column in columns}
body_checks = self.__filter_checks(checks, context="body")
with stream:
for row_number, headers, row in stream.iter(extended=True):
columns = []
iterator = zip_longest(headers, row, fillvalue=_FILLVALUE)
for number, (header, value) in enumerate(iterator, start=1):
colref = colmap.get(number, {})
column = {"number": number}
if header is not _FILLVALUE:
column["header"] = colref.get("header", header)
if "field" in colref:
column["field"] = colref["field"]
if value is not _FILLVALUE:
column["value"] = value
columns.append(column)
for check in body_checks:
if not columns:
break
state = states.setdefault(check["code"], {})
check["func"](errors, columns, row_number, state)
for error in reversed(errors):
if "row" in error:
break
error["row"] = row
if row_number >= self.__row_limit:
break
if len(errors) >= self.__error_limit:
break
# Stop timer
stop = datetime.datetime.now()
# Compose report
errors = errors[: self.__error_limit]
report = copy(extra)
report.update(
{
"time": round((stop - start).total_seconds(), 3),
"valid": not bool(errors),
"error-count": len(errors),
"row-count": row_number,
"headers": headers,
"source": source,
"errors": errors,
}
)
return report
|
https://github.com/frictionlessdata/frictionless-py/issues/189
|
Traceback (most recent call last):
File "/home/adria/dev/pyenvs/gt/lib/python3.5/site-packages/celery/app/trace.py", line 368, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/adria/dev/pyenvs/gt/lib/python3.5/site-packages/celery/app/trace.py", line 623, in __protected_call__
return self.run(*args, **kwargs)
File "/home/adria/dev/pyenvs/gt/src/goodtables.io/goodtablesio/tasks/validate.py", line 41, in validate
report = inspector.inspect(validation_conf['source'], preset='nested')
File "/home/adria/dev/pyenvs/gt/lib/python3.5/site-packages/goodtables/inspector.py", line 106, in inspect
report = task.get()
File "/usr/lib/python3.5/multiprocessing/pool.py", line 608, in get
raise self._value
File "/usr/lib/python3.5/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/adria/dev/pyenvs/gt/lib/python3.5/site-packages/goodtables/inspector.py", line 145, in __inspect_table
stream.open()
File "/home/adria/dev/pyenvs/gt/lib/python3.5/site-packages/tabulator/stream.py", line 206, in open
self.__extract_sample()
File "/home/adria/dev/pyenvs/gt/lib/python3.5/site-packages/tabulator/stream.py", line 356, in __extract_sample
number, headers, row = next(self.__parser.extended_rows)
File "/home/adria/dev/pyenvs/gt/lib/python3.5/site-packages/tabulator/parsers/csv.py", line 89, in __iter_extended_rows
sample, dialect = self.__prepare_dialect(self.__chars)
File "/home/adria/dev/pyenvs/gt/lib/python3.5/site-packages/tabulator/parsers/csv.py", line 100, in __prepare_dialect
sample.append(next(stream))
File "/home/adria/dev/pyenvs/gt/lib/python3.5/codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xcd in position 313: invalid continuation byte
[2017-05-10 16:28:30,081: ERROR/PoolWorker-1] Task goodtablesio.tasks.validate[0e8b8e8b-42dd-4d74-9510-132ccd100839] raised unexpected: UnicodeDecodeError('utf-8', b'CVE_ENT,CVE_MUN,CVE_LOC,NOM_LOC\r"01","001","0001","AGUASCALIENTES"\n"01","001","0094","GRANJA ADELITA"\n"01","001","0096","AGUA AZUL"\n"01","001","0100","RANCHO ALEGRE"\n"01","001","0102","LOS ARBOLITOS [RANCHO]"\n"01","001","0104","ARDILLAS DE ABAJO (LAS ARDILLAS)"\n"01","001","0106","ARELLANO"\n"01","001","0112","BAJ\xcdO LOS V\xc1ZQUEZ"\n"01","001","0113","BAJ\xcdO DE MONTORO"\n"01","001","0114","RESIDENCIAL SAN NICOL\xc1S [BA\xd1OS LA CANTERA]"\n"01","001","0120","BUENAVISTA DE PE\xd1UELAS"\n"01","001","0121","CABECITA 3 MAR\xcdAS (RANCHO NUEVO)"\n"01","001","0125","CA\xd1ADA GRANDE DE COTORINA"\n"01","001","0126","CA\xd1ADA HONDA [ESTACI\xd3N]"\n"01","001","0127","LOS CA\xd1OS"\n"01","001","0128","EL CARI\xd1\xc1N"\n"01","001","0129","EL CARMEN [GRANJA]"\n"01","001","0135","EL CEDAZO (CEDAZO DE SAN ANTONIO)"\n"01","001","0138","CENTRO DE ARRIBA (EL TARAY)"\n"01","001","0139","CIENEGUILLA (LA LUMBRERA)"\n"01","001","0141","COBOS"\n"01","001","0144","EL COLORADO (EL SOYATAL)"\n"01","001","0146","EL CONEJAL"\n"01","001","0157","COTORINA DE ABAJO"\n"01","001","0162","COYOTES"\n"01","001","0166","LA HUERTA (LA CRUZ)"\n"01","001","0170","CUAUHT\xc9MOC (LAS PALOMAS)"\n"01","001","0171","LOS CUERVOS (LOS OJOS DE AGUA)"\n"01","001","0172","SAN JOS\xc9 [GRANJA]"\n"01","001","0176","LA CHIRIPA"\n"01","001","0182","DOLORES"\n"01","001","0183","LOS DOLORES"\n"01","001","0190","EL DURAZNILLO"\n"01","001","0191","LOS DUR\xd3N"\n"01","001","0197","LA ESCONDIDA"\n"01","001","0201","DESDE EL CORAZ\xd3N DEL FRUTO"\n"01","001","0207","VALLE REDONDO"\n"01","001","0209","LA FORTUNA"\n"01","001","0212","LOMAS DEL GACHUP\xcdN"\n"01","001","0213","EL CARMEN (GALLINAS G\xdcERAS) [RANCHO]"\n"01","001","0216","LA GLORIA"\n"01","001","0226","HACIENDA NUEVA"\n"01","001","0227","LA HACIENDITA (LA ESPERANZA)"\n"01","001","0228","LA HERRADA"\n"01","001","0230","DON ABRAHAM [RANCHO]"\n"01","001","0231","PUERTA DE LOS HOYOS"\n"01","001","0236","LAS JABONERAS"\n"01","001","0237","JALTOMATE"\n"01","001","0239","GENERAL JOS\xc9 MAR\xcdA MORELOS Y PAV\xd3N (CA\xd1ADA HONDA)"\n"01","001","0253","LOS LIRIOS"\n"01","001","0256","LA LOMA DE LOS NEGRITOS"\n"01","001","0265","EL MALACATE"\n"01","001","0270","LA MASCOTA"\n"01","001","0272","MATAMOROS"\n"01","001","0279","SAN ISIDRO"\n"01","001","0280","SAN JOS\xc9"\n"01","001","0283","EL MOLINO"\n"01","001","0285","MONTORO"\n"01","001","0291","LOS NEGRITOS"\n"01","001","0292","EL NI\xc1GARA"\n"01","001","0293","NORIAS DE OJOCALIENTE"\n"01","001","0296","EL OCOTE"\n"01","001","0297","COMUNIDAD EL ROC\xcdO"\n"01","001","0309","LAS PALOMAS"\n"01","001","0315","PE\xd1UELAS (EL CIENEGAL)"\n"01","001","0321","PIEDRAS CHINAS"\n"01","001","0329","PRESA DE GUADALUPE"\n"01","001","0336","SOLEDAD DE ARRIBA"\n"01","001","0340","LA PUERTA (GRANJAS CARI\xd1\xc1N)"\n"01","001","0345","EL REFUGIO DE PE\xd1UELAS"\n"01","001","0347","EL REFUGIO I"\n"01","001","0353","EL RODEO', 313, 314, 'invalid continuation byte')
|
UnicodeDecodeError
|
def parse(code):
class_names, code = pre_parse(code)
if "\x00" in code:
raise ParserException("No null bytes (\\x00) allowed in the source code.")
o = ast.parse(code) # python ast
decorate_ast(o, code, class_names) # decorated python ast
o = resolve_negative_literals(o)
return o.body
|
def parse(code):
class_names, code = pre_parse(code)
o = ast.parse(code) # python ast
decorate_ast(o, code, class_names) # decorated python ast
o = resolve_negative_literals(o)
return o.body
|
https://github.com/vyperlang/vyper/issues/1184
|
Traceback (most recent call last):
File "/Users/mate/github/vyper/vyper-venv/bin/vyper", line 4, in <module>
__import__('pkg_resources').run_script('vyper==0.1.0b6', 'vyper')
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 664, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1451, in run_script
exec(script_code, namespace, namespace)
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 85, in <module>
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 108, in compile_codes
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 51, in exc_handler
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 105, in compile_codes
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 87, in <lambda>
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 8, in __compile
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/parser.py", line 63, in parse
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/ast.py", line 35, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
ValueError: source code string cannot contain null bytes
|
ValueError
|
def add_globals_and_events(self, item):
item_attributes = {"public": False}
# Make sure we have a valid variable name.
if not isinstance(item.target, ast.Name):
raise StructureException("Invalid global variable name", item.target)
# Handle constants.
if self.get_call_func_name(item) == "constant":
self._constants.add_constant(item, global_ctx=self)
return
# Handle events.
if not (self.get_call_func_name(item) == "event"):
item_name, item_attributes = self.get_item_name_and_attributes(
item, item_attributes
)
if not all([attr in valid_global_keywords for attr in item_attributes.keys()]):
raise StructureException(
"Invalid global keyword used: %s" % item_attributes, item
)
if item.value is not None:
raise StructureException("May not assign value whilst defining type", item)
elif self.get_call_func_name(item) == "event":
if self._globals or len(self._defs):
raise EventDeclarationException(
"Events must all come before global declarations and function definitions",
item,
)
self._events.append(item)
elif not isinstance(item.target, ast.Name):
raise StructureException(
"Can only assign type to variable in top-level statement", item
)
# Is this a custom unit definition.
elif item.target.id == "units":
if not self._custom_units:
if not isinstance(item.annotation, ast.Dict):
raise VariableDeclarationException(
"Define custom units using units: { }.", item.target
)
for key, value in zip(item.annotation.keys, item.annotation.values):
if not isinstance(value, ast.Str):
raise VariableDeclarationException(
"Custom unit description must be a valid string", value
)
if not isinstance(key, ast.Name):
raise VariableDeclarationException(
"Custom unit name must be a valid string", key
)
check_valid_varname(
key.id,
self._custom_units,
self._structs,
self._constants,
key,
"Custom unit invalid.",
)
self._custom_units.add(key.id)
self._custom_units_descriptions[key.id] = value.s
else:
raise VariableDeclarationException(
"Custom units can only be defined once", item.target
)
# Check if variable name is valid.
# Don't move this check higher, as unit parsing has to happen first.
elif not self.is_valid_varname(item.target.id, item):
pass
elif len(self._defs):
raise StructureException(
"Global variables must all come before function definitions", item
)
# If the type declaration is of the form public(<type here>), then proceed with
# the underlying type but also add getters
elif self.get_call_func_name(item) == "address":
if item.annotation.args[0].id not in premade_contracts:
raise VariableDeclarationException(
"Unsupported premade contract declaration", item.annotation.args[0]
)
premade_contract = premade_contracts[item.annotation.args[0].id]
self._contracts[item.target.id] = self.make_contract(premade_contract.body)
self._globals[item.target.id] = VariableRecord(
item.target.id, len(self._globals), BaseType("address"), True
)
elif item_name in self._contracts:
self._globals[item.target.id] = ContractRecord(
item.target.id, len(self._globals), ContractType(item_name), True
)
if item_attributes["public"]:
typ = ContractType(item_name)
for getter in self.mk_getter(item.target.id, typ):
self._getters.append(self.parse_line("\n" * (item.lineno - 1) + getter))
self._getters[-1].pos = getpos(item)
elif self.get_call_func_name(item) == "public":
if (
isinstance(item.annotation.args[0], ast.Name)
and item_name in self._contracts
):
typ = ContractType(item_name)
else:
typ = parse_type(
item.annotation.args[0],
"storage",
custom_units=self._custom_units,
custom_structs=self._structs,
constants=self._constants,
)
self._globals[item.target.id] = VariableRecord(
item.target.id, len(self._globals), typ, True
)
# Adding getters here
for getter in self.mk_getter(item.target.id, typ):
self._getters.append(self.parse_line("\n" * (item.lineno - 1) + getter))
self._getters[-1].pos = getpos(item)
elif isinstance(item.annotation, (ast.Name, ast.Call, ast.Subscript)):
self._globals[item.target.id] = VariableRecord(
item.target.id,
len(self._globals),
parse_type(
item.annotation,
"storage",
custom_units=self._custom_units,
custom_structs=self._structs,
constants=self._constants,
),
True,
)
else:
raise InvalidTypeException("Invalid global type specified", item)
|
def add_globals_and_events(self, item):
item_attributes = {"public": False}
# Handle constants.
if self.get_call_func_name(item) == "constant":
self._constants.add_constant(item, global_ctx=self)
return
# Handle events.
if not (self.get_call_func_name(item) == "event"):
item_name, item_attributes = self.get_item_name_and_attributes(
item, item_attributes
)
if not all([attr in valid_global_keywords for attr in item_attributes.keys()]):
raise StructureException(
"Invalid global keyword used: %s" % item_attributes, item
)
if item.value is not None:
raise StructureException("May not assign value whilst defining type", item)
elif self.get_call_func_name(item) == "event":
if self._globals or len(self._defs):
raise EventDeclarationException(
"Events must all come before global declarations and function definitions",
item,
)
self._events.append(item)
elif not isinstance(item.target, ast.Name):
raise StructureException(
"Can only assign type to variable in top-level statement", item
)
# Is this a custom unit definition.
elif item.target.id == "units":
if not self._custom_units:
if not isinstance(item.annotation, ast.Dict):
raise VariableDeclarationException(
"Define custom units using units: { }.", item.target
)
for key, value in zip(item.annotation.keys, item.annotation.values):
if not isinstance(value, ast.Str):
raise VariableDeclarationException(
"Custom unit description must be a valid string", value
)
if not isinstance(key, ast.Name):
raise VariableDeclarationException(
"Custom unit name must be a valid string", key
)
check_valid_varname(
key.id,
self._custom_units,
self._structs,
self._constants,
key,
"Custom unit invalid.",
)
self._custom_units.add(key.id)
self._custom_units_descriptions[key.id] = value.s
else:
raise VariableDeclarationException(
"Custom units can only be defined once", item.target
)
# Check if variable name is valid.
# Don't move this check higher, as unit parsing has to happen first.
elif not self.is_valid_varname(item.target.id, item):
pass
elif len(self._defs):
raise StructureException(
"Global variables must all come before function definitions", item
)
# If the type declaration is of the form public(<type here>), then proceed with
# the underlying type but also add getters
elif self.get_call_func_name(item) == "address":
if item.annotation.args[0].id not in premade_contracts:
raise VariableDeclarationException(
"Unsupported premade contract declaration", item.annotation.args[0]
)
premade_contract = premade_contracts[item.annotation.args[0].id]
self._contracts[item.target.id] = self.make_contract(premade_contract.body)
self._globals[item.target.id] = VariableRecord(
item.target.id, len(self._globals), BaseType("address"), True
)
elif item_name in self._contracts:
self._globals[item.target.id] = ContractRecord(
item.target.id, len(self._globals), ContractType(item_name), True
)
if item_attributes["public"]:
typ = ContractType(item_name)
for getter in self.mk_getter(item.target.id, typ):
self._getters.append(self.parse_line("\n" * (item.lineno - 1) + getter))
self._getters[-1].pos = getpos(item)
elif self.get_call_func_name(item) == "public":
if (
isinstance(item.annotation.args[0], ast.Name)
and item_name in self._contracts
):
typ = ContractType(item_name)
else:
typ = parse_type(
item.annotation.args[0],
"storage",
custom_units=self._custom_units,
custom_structs=self._structs,
constants=self._constants,
)
self._globals[item.target.id] = VariableRecord(
item.target.id, len(self._globals), typ, True
)
# Adding getters here
for getter in self.mk_getter(item.target.id, typ):
self._getters.append(self.parse_line("\n" * (item.lineno - 1) + getter))
self._getters[-1].pos = getpos(item)
elif isinstance(item.annotation, (ast.Name, ast.Call, ast.Subscript)):
self._globals[item.target.id] = VariableRecord(
item.target.id,
len(self._globals),
parse_type(
item.annotation,
"storage",
custom_units=self._custom_units,
custom_structs=self._structs,
constants=self._constants,
),
True,
)
else:
raise InvalidTypeException("Invalid global type specified", item)
|
https://github.com/vyperlang/vyper/issues/1185
|
Error compiling: 4.txt
Traceback (most recent call last):
File "/Users/mate/ethermat/security/vyper-fuzz/venv/bin/vyper", line 4, in <module>
__import__('pkg_resources').run_script('vyper==0.1.0b6', 'vyper')
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/setuptools-39.1.0-py3.6.egg/pkg_resources/__init__.py", line 658, in run_script
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/setuptools-39.1.0-py3.6.egg/pkg_resources/__init__.py", line 1445, in run_script
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 85, in <module>
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 108, in compile_codes
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 51, in exc_handler
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 105, in compile_codes
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 87, in <lambda>
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 8, in __compile
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/parser.py", line 234, in parse_tree_to_lll
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/parser.py", line 234, in <listcomp>
AttributeError: 'Attribute' object has no attribute 'id'
|
AttributeError
|
def parse_type(
item, location, sigs=None, custom_units=None, custom_structs=None, constants=None
):
# Base and custom types, e.g. num
if isinstance(item, ast.Name):
if item.id in base_types:
return BaseType(item.id)
elif item.id in special_types:
return special_types[item.id]
elif (custom_structs is not None) and (item.id in custom_structs):
return make_struct_type(
item.id,
location,
custom_structs[item.id],
custom_units,
custom_structs,
constants,
)
else:
raise InvalidTypeException("Invalid base type: " + item.id, item)
# Units, e.g. num (1/sec) or contracts
elif isinstance(item, ast.Call):
# Mapping type.
if item.func.id == "map":
if location == "memory":
raise InvalidTypeException(
"No mappings allowed for in-memory types, only fixed-size arrays",
item,
)
if len(item.args) != 2:
raise InvalidTypeException(
"Mapping requires 2 valid positional arguments.", item
)
keytype = parse_type(
item.args[0],
None,
custom_units=custom_units,
custom_structs=custom_structs,
constants=constants,
)
if not isinstance(keytype, (BaseType, ByteArrayType)):
raise InvalidTypeException(
"Mapping keys must be base or bytes types", item
)
return MappingType(
keytype,
parse_type(
item.args[1],
location,
custom_units=custom_units,
custom_structs=custom_structs,
constants=constants,
),
)
# Contract_types
if item.func.id == "address":
if sigs and item.args[0].id in sigs:
return ContractType(item.args[0].id)
# Struct types
if (custom_structs is not None) and (item.func.id in custom_structs):
return make_struct_type(
item.id, location, custom_structs[item.id], custom_units, custom_structs
)
if not isinstance(item.func, ast.Name):
raise InvalidTypeException("Malformed unit type:", item)
base_type = item.func.id
if base_type not in ("int128", "uint256", "decimal"):
raise InvalidTypeException(
"You must use int128, uint256, decimal, address, contract, \
for variable declarations and indexed for logging topics ",
item,
)
if len(item.args) == 0:
raise InvalidTypeException("Malformed unit type", item)
if isinstance(item.args[-1], ast.Name) and item.args[-1].id == "positional":
positional = True
argz = item.args[:-1]
else:
positional = False
argz = item.args
if len(argz) != 1:
raise InvalidTypeException("Malformed unit type", item)
unit = parse_unit(argz[0], custom_units=custom_units)
return BaseType(base_type, unit, positional)
# Subscripts
elif isinstance(item, ast.Subscript):
if "value" not in vars(item.slice):
raise InvalidTypeException(
"Array / ByteArray access must access a single element, not a slice",
item,
)
# Fixed size lists or bytearrays, e.g. num[100]
is_constant_val = constants.ast_is_constant(item.slice.value)
if isinstance(item.slice.value, ast.Num) or is_constant_val:
n_val = (
constants.get_constant(item.slice.value.id, context=None).value
if is_constant_val
else item.slice.value.n
)
if not isinstance(n_val, int) or n_val <= 0:
raise InvalidTypeException(
"Arrays / ByteArrays must have a positive integral number of elements",
item.slice.value,
)
# ByteArray
if getattr(item.value, "id", None) == "bytes":
return ByteArrayType(n_val)
# List
else:
return ListType(
parse_type(
item.value,
location,
custom_units=custom_units,
custom_structs=custom_structs,
constants=constants,
),
n_val,
)
# Mappings, e.g. num[address]
else:
warnings.warn(
"Mapping definitions using subscript have deprecated (see VIP564). "
"Use map(type1, type2) instead.",
DeprecationWarning,
)
raise InvalidTypeException("Unknown list type.", item)
# Dicts, used to represent mappings, e.g. {uint: uint}. Key must be a base type
elif isinstance(item, ast.Dict):
warnings.warn(
"Anonymous structs have been removed in favor of named structs, see VIP300",
DeprecationWarning,
)
raise InvalidTypeException("Invalid type: %r" % ast.dump(item), item)
elif isinstance(item, ast.Tuple):
members = [
parse_type(
x,
location,
custom_units=custom_units,
custom_structs=custom_structs,
constants=constants,
)
for x in item.elts
]
return TupleType(members)
else:
raise InvalidTypeException("Invalid type: %r" % ast.dump(item), item)
|
def parse_type(
item, location, sigs=None, custom_units=None, custom_structs=None, constants=None
):
# Base and custom types, e.g. num
if isinstance(item, ast.Name):
if item.id in base_types:
return BaseType(item.id)
elif item.id in special_types:
return special_types[item.id]
elif (custom_structs is not None) and (item.id in custom_structs):
return make_struct_type(
item.id,
location,
custom_structs[item.id],
custom_units,
custom_structs,
constants,
)
else:
raise InvalidTypeException("Invalid base type: " + item.id, item)
# Units, e.g. num (1/sec) or contracts
elif isinstance(item, ast.Call):
# Mapping type.
if item.func.id == "map":
if location == "memory":
raise InvalidTypeException(
"No mappings allowed for in-memory types, only fixed-size arrays",
item,
)
keytype = parse_type(
item.args[0],
None,
custom_units=custom_units,
custom_structs=custom_structs,
constants=constants,
)
if not isinstance(keytype, (BaseType, ByteArrayType)):
raise InvalidTypeException(
"Mapping keys must be base or bytes types", item
)
return MappingType(
keytype,
parse_type(
item.args[1],
location,
custom_units=custom_units,
custom_structs=custom_structs,
constants=constants,
),
)
# Contract_types
if item.func.id == "address":
if sigs and item.args[0].id in sigs:
return ContractType(item.args[0].id)
# Struct types
if (custom_structs is not None) and (item.func.id in custom_structs):
return make_struct_type(
item.id, location, custom_structs[item.id], custom_units, custom_structs
)
if not isinstance(item.func, ast.Name):
raise InvalidTypeException("Malformed unit type:", item)
base_type = item.func.id
if base_type not in ("int128", "uint256", "decimal"):
raise InvalidTypeException(
"You must use int128, uint256, decimal, address, contract, \
for variable declarations and indexed for logging topics ",
item,
)
if len(item.args) == 0:
raise InvalidTypeException("Malformed unit type", item)
if isinstance(item.args[-1], ast.Name) and item.args[-1].id == "positional":
positional = True
argz = item.args[:-1]
else:
positional = False
argz = item.args
if len(argz) != 1:
raise InvalidTypeException("Malformed unit type", item)
unit = parse_unit(argz[0], custom_units=custom_units)
return BaseType(base_type, unit, positional)
# Subscripts
elif isinstance(item, ast.Subscript):
if "value" not in vars(item.slice):
raise InvalidTypeException(
"Array / ByteArray access must access a single element, not a slice",
item,
)
# Fixed size lists or bytearrays, e.g. num[100]
is_constant_val = constants.ast_is_constant(item.slice.value)
if isinstance(item.slice.value, ast.Num) or is_constant_val:
n_val = (
constants.get_constant(item.slice.value.id, context=None).value
if is_constant_val
else item.slice.value.n
)
if not isinstance(n_val, int) or n_val <= 0:
raise InvalidTypeException(
"Arrays / ByteArrays must have a positive integral number of elements",
item.slice.value,
)
# ByteArray
if getattr(item.value, "id", None) == "bytes":
return ByteArrayType(n_val)
# List
else:
return ListType(
parse_type(
item.value,
location,
custom_units=custom_units,
custom_structs=custom_structs,
constants=constants,
),
n_val,
)
# Mappings, e.g. num[address]
else:
warnings.warn(
"Mapping definitions using subscript have deprecated (see VIP564). "
"Use map(type1, type2) instead.",
DeprecationWarning,
)
raise InvalidTypeException("Unknown list type.", item)
# Dicts, used to represent mappings, e.g. {uint: uint}. Key must be a base type
elif isinstance(item, ast.Dict):
warnings.warn(
"Anonymous structs have been removed in favor of named structs, see VIP300",
DeprecationWarning,
)
raise InvalidTypeException("Invalid type: %r" % ast.dump(item), item)
elif isinstance(item, ast.Tuple):
members = [
parse_type(
x,
location,
custom_units=custom_units,
custom_structs=custom_structs,
constants=constants,
)
for x in item.elts
]
return TupleType(members)
else:
raise InvalidTypeException("Invalid type: %r" % ast.dump(item), item)
|
https://github.com/vyperlang/vyper/issues/1186
|
Error compiling: 10.txt
Traceback (most recent call last):
File "/Users/mate/ethermat/security/vyper-fuzz/venv/bin/vyper", line 4, in <module>
__import__('pkg_resources').run_script('vyper==0.1.0b6', 'vyper')
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/setuptools-39.1.0-py3.6.egg/pkg_resources/__init__.py", line 658, in run_script
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/setuptools-39.1.0-py3.6.egg/pkg_resources/__init__.py", line 1445, in run_script
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 85, in <module>
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 108, in compile_codes
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 51, in exc_handler
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 105, in compile_codes
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 87, in <lambda>
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 8, in __compile
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/parser.py", line 229, in parse_tree_to_lll
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/global_context.py", line 75, in get_global_context
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/global_context.py", line 307, in add_globals_and_events
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/types/types.py", line 321, in parse_type
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/types/types.py", line 321, in parse_type
IndexError: list index out of range
|
IndexError
|
def get_item_name_and_attributes(self, item, attributes):
if isinstance(item, ast.Name):
return item.id, attributes
elif isinstance(item, ast.AnnAssign):
return self.get_item_name_and_attributes(item.annotation, attributes)
elif isinstance(item, ast.Subscript):
return self.get_item_name_and_attributes(item.value, attributes)
elif (
isinstance(item, ast.Call)
and isinstance(item.func, ast.Name)
and item.func.id == "map"
):
if len(item.args) != 2:
raise StructureException(
"Map type expects two type arguments map(type1, type2)", item.func
)
return self.get_item_name_and_attributes(item.args, attributes)
# elif ist
elif isinstance(item, ast.Call) and isinstance(item.func, ast.Name):
attributes[item.func.id] = True
# Raise for multiple args
if len(item.args) != 1:
raise StructureException("%s expects one arg (the type)" % item.func.id)
return self.get_item_name_and_attributes(item.args[0], attributes)
return None, attributes
|
def get_item_name_and_attributes(self, item, attributes):
if isinstance(item, ast.Name):
return item.id, attributes
elif isinstance(item, ast.AnnAssign):
return self.get_item_name_and_attributes(item.annotation, attributes)
elif isinstance(item, ast.Subscript):
return self.get_item_name_and_attributes(item.value, attributes)
elif isinstance(item, ast.Call) and item.func.id == "map":
if len(item.args) != 2:
raise StructureException(
"Map type expects two type arguments map(type1, type2)", item.func
)
return self.get_item_name_and_attributes(item.args, attributes)
# elif ist
elif isinstance(item, ast.Call):
attributes[item.func.id] = True
# Raise for multiple args
if len(item.args) != 1:
raise StructureException("%s expects one arg (the type)" % item.func.id)
return self.get_item_name_and_attributes(item.args[0], attributes)
return None, attributes
|
https://github.com/vyperlang/vyper/issues/1188
|
Error compiling: 5.txt
Traceback (most recent call last):
File "/Users/mate/ethermat/security/vyper-fuzz/venv/bin/vyper", line 4, in <module>
__import__('pkg_resources').run_script('vyper==0.1.0b6', 'vyper')
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/setuptools-39.1.0-py3.6.egg/pkg_resources/__init__.py", line 658, in run_script
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/setuptools-39.1.0-py3.6.egg/pkg_resources/__init__.py", line 1445, in run_script
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 85, in <module>
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 108, in compile_codes
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 51, in exc_handler
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 105, in compile_codes
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 87, in <lambda>
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 8, in __compile
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/parser.py", line 229, in parse_tree_to_lll
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/global_context.py", line 75, in get_global_context
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/global_context.py", line 234, in add_globals_and_events
AttributeError: 'Attribute' object has no attribute 'id'
|
AttributeError
|
def add_globals_and_events(self, item):
item_attributes = {"public": False}
# Handle constants.
if self.get_call_func_name(item) == "constant":
self._constants.add_constant(item, global_ctx=self)
return
# Handle events.
if not (self.get_call_func_name(item) == "event"):
item_name, item_attributes = self.get_item_name_and_attributes(
item, item_attributes
)
if not all([attr in valid_global_keywords for attr in item_attributes.keys()]):
raise StructureException(
"Invalid global keyword used: %s" % item_attributes, item
)
if item.value is not None:
raise StructureException("May not assign value whilst defining type", item)
elif self.get_call_func_name(item) == "event":
if self._globals or len(self._defs):
raise EventDeclarationException(
"Events must all come before global declarations and function definitions",
item,
)
self._events.append(item)
elif not isinstance(item.target, ast.Name):
raise StructureException(
"Can only assign type to variable in top-level statement", item
)
# Is this a custom unit definition.
elif item.target.id == "units":
if not self._custom_units:
if not isinstance(item.annotation, ast.Dict):
raise VariableDeclarationException(
"Define custom units using units: { }.", item.target
)
for key, value in zip(item.annotation.keys, item.annotation.values):
if not isinstance(value, ast.Str):
raise VariableDeclarationException(
"Custom unit description must be a valid string", value
)
if not isinstance(key, ast.Name):
raise VariableDeclarationException(
"Custom unit name must be a valid string", key
)
check_valid_varname(
key.id,
self._custom_units,
self._structs,
self._constants,
key,
"Custom unit invalid.",
)
self._custom_units.add(key.id)
self._custom_units_descriptions[key.id] = value.s
else:
raise VariableDeclarationException(
"Custom units can only be defined once", item.target
)
# Check if variable name is valid.
# Don't move this check higher, as unit parsing has to happen first.
elif not self.is_valid_varname(item.target.id, item):
pass
elif len(self._defs):
raise StructureException(
"Global variables must all come before function definitions", item
)
# If the type declaration is of the form public(<type here>), then proceed with
# the underlying type but also add getters
elif self.get_call_func_name(item) == "address":
if item.annotation.args[0].id not in premade_contracts:
raise VariableDeclarationException(
"Unsupported premade contract declaration", item.annotation.args[0]
)
premade_contract = premade_contracts[item.annotation.args[0].id]
self._contracts[item.target.id] = self.make_contract(premade_contract.body)
self._globals[item.target.id] = VariableRecord(
item.target.id, len(self._globals), BaseType("address"), True
)
elif item_name in self._contracts:
self._globals[item.target.id] = ContractRecord(
item.target.id, len(self._globals), ContractType(item_name), True
)
if item_attributes["public"]:
typ = ContractType(item_name)
for getter in self.mk_getter(item.target.id, typ):
self._getters.append(self.parse_line("\n" * (item.lineno - 1) + getter))
self._getters[-1].pos = getpos(item)
elif self.get_call_func_name(item) == "public":
if (
isinstance(item.annotation.args[0], ast.Name)
and item_name in self._contracts
):
typ = ContractType(item_name)
else:
typ = parse_type(
item.annotation.args[0],
"storage",
custom_units=self._custom_units,
custom_structs=self._structs,
constants=self._constants,
)
self._globals[item.target.id] = VariableRecord(
item.target.id, len(self._globals), typ, True
)
# Adding getters here
for getter in self.mk_getter(item.target.id, typ):
self._getters.append(self.parse_line("\n" * (item.lineno - 1) + getter))
self._getters[-1].pos = getpos(item)
elif isinstance(item.annotation, (ast.Name, ast.Call, ast.Subscript)):
self._globals[item.target.id] = VariableRecord(
item.target.id,
len(self._globals),
parse_type(
item.annotation,
"storage",
custom_units=self._custom_units,
custom_structs=self._structs,
constants=self._constants,
),
True,
)
else:
raise InvalidTypeException("Invalid global type specified", item)
|
def add_globals_and_events(self, item):
item_attributes = {"public": False}
# Handle constants.
if isinstance(item.annotation, ast.Call) and item.annotation.func.id == "constant":
self._constants.add_constant(item, global_ctx=self)
return
# Handle events.
if not (
isinstance(item.annotation, ast.Call) and item.annotation.func.id == "event"
):
item_name, item_attributes = self.get_item_name_and_attributes(
item, item_attributes
)
if not all([attr in valid_global_keywords for attr in item_attributes.keys()]):
raise StructureException(
"Invalid global keyword used: %s" % item_attributes, item
)
if item.value is not None:
raise StructureException("May not assign value whilst defining type", item)
elif isinstance(item.annotation, ast.Call) and item.annotation.func.id == "event":
if self._globals or len(self._defs):
raise EventDeclarationException(
"Events must all come before global declarations and function definitions",
item,
)
self._events.append(item)
elif not isinstance(item.target, ast.Name):
raise StructureException(
"Can only assign type to variable in top-level statement", item
)
# Is this a custom unit definition.
elif item.target.id == "units":
if not self._custom_units:
if not isinstance(item.annotation, ast.Dict):
raise VariableDeclarationException(
"Define custom units using units: { }.", item.target
)
for key, value in zip(item.annotation.keys, item.annotation.values):
if not isinstance(value, ast.Str):
raise VariableDeclarationException(
"Custom unit description must be a valid string", value
)
if not isinstance(key, ast.Name):
raise VariableDeclarationException(
"Custom unit name must be a valid string", key
)
check_valid_varname(
key.id,
self._custom_units,
self._structs,
self._constants,
key,
"Custom unit invalid.",
)
self._custom_units.add(key.id)
self._custom_units_descriptions[key.id] = value.s
else:
raise VariableDeclarationException(
"Custom units can only be defined once", item.target
)
# Check if variable name is valid.
# Don't move this check higher, as unit parsing has to happen first.
elif not self.is_valid_varname(item.target.id, item):
pass
elif len(self._defs):
raise StructureException(
"Global variables must all come before function definitions", item
)
# If the type declaration is of the form public(<type here>), then proceed with
# the underlying type but also add getters
elif isinstance(item.annotation, ast.Call) and item.annotation.func.id == "address":
if item.annotation.args[0].id not in premade_contracts:
raise VariableDeclarationException(
"Unsupported premade contract declaration", item.annotation.args[0]
)
premade_contract = premade_contracts[item.annotation.args[0].id]
self._contracts[item.target.id] = self.make_contract(premade_contract.body)
self._globals[item.target.id] = VariableRecord(
item.target.id, len(self._globals), BaseType("address"), True
)
elif item_name in self._contracts:
self._globals[item.target.id] = ContractRecord(
item.target.id, len(self._globals), ContractType(item_name), True
)
if item_attributes["public"]:
typ = ContractType(item_name)
for getter in self.mk_getter(item.target.id, typ):
self._getters.append(self.parse_line("\n" * (item.lineno - 1) + getter))
self._getters[-1].pos = getpos(item)
elif isinstance(item.annotation, ast.Call) and item.annotation.func.id == "public":
if (
isinstance(item.annotation.args[0], ast.Name)
and item_name in self._contracts
):
typ = ContractType(item_name)
else:
typ = parse_type(
item.annotation.args[0],
"storage",
custom_units=self._custom_units,
custom_structs=self._structs,
constants=self._constants,
)
self._globals[item.target.id] = VariableRecord(
item.target.id, len(self._globals), typ, True
)
# Adding getters here
for getter in self.mk_getter(item.target.id, typ):
self._getters.append(self.parse_line("\n" * (item.lineno - 1) + getter))
self._getters[-1].pos = getpos(item)
else:
self._globals[item.target.id] = VariableRecord(
item.target.id,
len(self._globals),
parse_type(
item.annotation,
"storage",
custom_units=self._custom_units,
custom_structs=self._structs,
constants=self._constants,
),
True,
)
|
https://github.com/vyperlang/vyper/issues/1188
|
Error compiling: 5.txt
Traceback (most recent call last):
File "/Users/mate/ethermat/security/vyper-fuzz/venv/bin/vyper", line 4, in <module>
__import__('pkg_resources').run_script('vyper==0.1.0b6', 'vyper')
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/setuptools-39.1.0-py3.6.egg/pkg_resources/__init__.py", line 658, in run_script
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/setuptools-39.1.0-py3.6.egg/pkg_resources/__init__.py", line 1445, in run_script
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 85, in <module>
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 108, in compile_codes
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 51, in exc_handler
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 105, in compile_codes
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 87, in <lambda>
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 8, in __compile
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/parser.py", line 229, in parse_tree_to_lll
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/global_context.py", line 75, in get_global_context
File "/Users/mate/ethermat/security/vyper-fuzz/venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/global_context.py", line 234, in add_globals_and_events
AttributeError: 'Attribute' object has no attribute 'id'
|
AttributeError
|
def from_declaration(cls, code, global_ctx):
name = code.target.id
pos = 0
check_valid_varname(
name,
global_ctx._custom_units,
global_ctx._structs,
global_ctx._constants,
pos=code,
error_prefix="Event name invalid. ",
exc=EventDeclarationException,
)
# Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ...
args = []
indexed_list = []
topics_count = 1
if code.annotation.args:
keys = code.annotation.args[0].keys
values = code.annotation.args[0].values
for i in range(len(keys)):
typ = values[i]
if not isinstance(keys[i], ast.Name):
raise EventDeclarationException(
"Invalid key type, expected a valid name.", keys[i]
)
arg = keys[i].id
arg_item = keys[i]
is_indexed = False
# Check to see if argument is a topic
if isinstance(typ, ast.Call) and typ.func.id == "indexed":
typ = values[i].args[0]
indexed_list.append(True)
topics_count += 1
is_indexed = True
else:
indexed_list.append(False)
if (
isinstance(typ, ast.Subscript)
and getattr(typ.value, "id", None) == "bytes"
and typ.slice.value.n > 32
and is_indexed
):
raise EventDeclarationException(
"Indexed arguments are limited to 32 bytes"
)
if topics_count > 4:
raise EventDeclarationException(
"Maximum of 3 topics {} given".format(topics_count - 1), arg
)
if not isinstance(arg, str):
raise VariableDeclarationException("Argument name invalid", arg)
if not typ:
raise InvalidTypeException("Argument must have type", arg)
check_valid_varname(
arg,
global_ctx._custom_units,
global_ctx._structs,
global_ctx._constants,
pos=arg_item,
error_prefix="Event argument name invalid or reserved.",
)
if arg in (x.name for x in args):
raise VariableDeclarationException(
"Duplicate function argument name: " + arg, arg_item
)
# Can struct be logged?
parsed_type = global_ctx.parse_type(typ, None)
args.append(VariableRecord(arg, pos, parsed_type, False))
if isinstance(parsed_type, ByteArrayType):
pos += ceil32(typ.slice.value.n)
else:
pos += get_size_of_type(parsed_type) * 32
sig = (
name
+ "("
+ ",".join(
[
canonicalize_type(arg.typ, indexed_list[pos])
for pos, arg in enumerate(args)
]
)
+ ")"
) # noqa F812
event_id = bytes_to_int(sha3(bytes(sig, "utf-8")))
return cls(name, args, indexed_list, event_id, sig)
|
def from_declaration(cls, code, global_ctx):
name = code.target.id
pos = 0
check_valid_varname(
name,
global_ctx._custom_units,
global_ctx._structs,
global_ctx._constants,
pos=code,
error_prefix="Event name invalid. ",
exc=EventDeclarationException,
)
# Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ...
args = []
indexed_list = []
topics_count = 1
if code.annotation.args:
keys = code.annotation.args[0].keys
values = code.annotation.args[0].values
for i in range(len(keys)):
typ = values[i]
arg = keys[i].id
arg_item = keys[i]
is_indexed = False
# Check to see if argument is a topic
if isinstance(typ, ast.Call) and typ.func.id == "indexed":
typ = values[i].args[0]
indexed_list.append(True)
topics_count += 1
is_indexed = True
else:
indexed_list.append(False)
if (
isinstance(typ, ast.Subscript)
and getattr(typ.value, "id", None) == "bytes"
and typ.slice.value.n > 32
and is_indexed
):
raise EventDeclarationException(
"Indexed arguments are limited to 32 bytes"
)
if topics_count > 4:
raise EventDeclarationException(
"Maximum of 3 topics {} given".format(topics_count - 1), arg
)
if not isinstance(arg, str):
raise VariableDeclarationException("Argument name invalid", arg)
if not typ:
raise InvalidTypeException("Argument must have type", arg)
check_valid_varname(
arg,
global_ctx._custom_units,
global_ctx._structs,
global_ctx._constants,
pos=arg_item,
error_prefix="Event argument name invalid or reserved.",
)
if arg in (x.name for x in args):
raise VariableDeclarationException(
"Duplicate function argument name: " + arg, arg_item
)
# Can struct be logged?
parsed_type = global_ctx.parse_type(typ, None)
args.append(VariableRecord(arg, pos, parsed_type, False))
if isinstance(parsed_type, ByteArrayType):
pos += ceil32(typ.slice.value.n)
else:
pos += get_size_of_type(parsed_type) * 32
sig = (
name
+ "("
+ ",".join(
[
canonicalize_type(arg.typ, indexed_list[pos])
for pos, arg in enumerate(args)
]
)
+ ")"
) # noqa F812
event_id = bytes_to_int(sha3(bytes(sig, "utf-8")))
return cls(name, args, indexed_list, event_id, sig)
|
https://github.com/vyperlang/vyper/issues/1189
|
Error compiling: 6.txt
Traceback (most recent call last):
File "/Users/mate/github/vyper/vyper-venv/bin/vyper", line 4, in <module>
__import__('pkg_resources').run_script('vyper==0.1.0b6', 'vyper')
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 664, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/pkg_resources/__init__.py", line 1451, in run_script
exec(script_code, namespace, namespace)
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 85, in <module>
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 108, in compile_codes
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/EGG-INFO/scripts/vyper", line 51, in exc_handler
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 105, in compile_codes
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 87, in <lambda>
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/compiler.py", line 8, in __compile
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/parser.py", line 249, in parse_tree_to_lll
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/parser/parser.py", line 174, in parse_events
File "/Users/mate/github/vyper/vyper-venv/lib/python3.6/site-packages/vyper-0.1.0b6-py3.6.egg/vyper/signatures/event_signature.py", line 54, in from_declaration
AttributeError: 'BinOp' object has no attribute 'id'
|
AttributeError
|
def base_type_conversion(orig, frm, to, pos):
orig = unwrap_location(orig)
if (
getattr(frm, "is_literal", False)
and frm.typ in ("int128", "uint256")
and not SizeLimits.in_bounds(frm.typ, orig.value)
):
raise InvalidLiteralException("Number out of range: " + str(orig.value), pos)
# # Valid bytes[32] to bytes32 assignment.
# if isinstance(to, BaseType) and to.typ = 'bytes32' and isinstance(frm, ByteArrayType) and frm.maxlen == 32:
# return LLLnode(orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate)
if not isinstance(frm, (BaseType, NullType)) or not isinstance(to, BaseType):
raise TypeMismatchException(
"Base type conversion from or to non-base type: %r %r" % (frm, to), pos
)
elif is_base_type(frm, to.typ) and are_units_compatible(frm, to):
return LLLnode(
orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate
)
elif (
is_base_type(frm, "int128")
and is_base_type(to, "decimal")
and are_units_compatible(frm, to)
):
return LLLnode.from_list(
["mul", orig, DECIMAL_DIVISOR],
typ=BaseType("decimal", to.unit, to.positional),
)
elif isinstance(frm, NullType):
if to.typ not in ("int128", "bool", "uint256", "address", "bytes32", "decimal"):
# This is only to future proof the use of base_type_conversion.
raise TypeMismatchException(
"Cannot convert null-type object to type %r" % to, pos
) # pragma: no cover
return LLLnode.from_list(0, typ=to)
elif isinstance(to, ContractType) and frm.typ == "address":
return LLLnode(
orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate
)
# Integer literal conversion.
elif (frm.typ, to.typ, frm.is_literal) == ("int128", "uint256", True):
return LLLnode(
orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate
)
else:
raise TypeMismatchException(
"Typecasting from base type %r to %r unavailable" % (frm, to), pos
)
|
def base_type_conversion(orig, frm, to, pos):
orig = unwrap_location(orig)
if (
getattr(frm, "is_literal", False)
and frm.typ in ("int128", "uint256")
and not SizeLimits.in_bounds(frm.typ, orig.value)
):
raise InvalidLiteralException("Number out of range: " + str(orig.value), pos)
if not isinstance(frm, (BaseType, NullType)) or not isinstance(to, BaseType):
raise TypeMismatchException(
"Base type conversion from or to non-base type: %r %r" % (frm, to), pos
)
elif is_base_type(frm, to.typ) and are_units_compatible(frm, to):
return LLLnode(
orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate
)
elif (
is_base_type(frm, "int128")
and is_base_type(to, "decimal")
and are_units_compatible(frm, to)
):
return LLLnode.from_list(
["mul", orig, DECIMAL_DIVISOR],
typ=BaseType("decimal", to.unit, to.positional),
)
elif isinstance(frm, NullType):
if to.typ not in ("int128", "bool", "uint256", "address", "bytes32", "decimal"):
# This is only to future proof the use of base_type_conversion.
raise TypeMismatchException(
"Cannot convert null-type object to type %r" % to, pos
) # pragma: no cover
return LLLnode.from_list(0, typ=to)
elif isinstance(to, ContractType) and frm.typ == "address":
return LLLnode(
orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate
)
# Integer literal conversion.
elif (frm.typ, to.typ, frm.is_literal) == ("int128", "uint256", True):
return LLLnode(
orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate
)
else:
raise TypeMismatchException(
"Typecasting from base type %r to %r unavailable" % (frm, to), pos
)
|
https://github.com/vyperlang/vyper/issues/1088
|
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 51879)
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 647, in process_request_thread
self.finish_request(request, client_address)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 357, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 717, in __init__
self.handle()
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/server.py", line 426, in handle
self.handle_one_request()
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/server.py", line 414, in handle_one_request
method()
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/EGG-INFO/scripts/vyper-serve", line 66, in do_POST
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/EGG-INFO/scripts/vyper-serve", line 89, in _compile
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/compiler.py", line 44, in mk_full_signature
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/compiler.py", line 27, in gas_estimate
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 794, in parse_to_lll
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 224, in parse_tree_to_lll
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 172, in parse_other_functions
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 530, in parse_func
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 530, in <listcomp>
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 547, in parse_body
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 601, in parse_stmt
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 76, in __init__
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 150, in ann_assign
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 105, in _check_valid_assign
AttributeError: 'ByteArrayType' object has no attribute 'typ'
----------------------------------------
|
AttributeError
|
def _check_valid_assign(self, sub):
if isinstance(self.stmt.annotation, ast.Call): # unit style: num(wei)
if self.stmt.annotation.func.id != sub.typ.typ and not sub.typ.is_literal:
raise TypeMismatchException(
"Invalid type, expected: %s" % self.stmt.annotation.func.id, self.stmt
)
elif (
isinstance(self.stmt.annotation, ast.Name)
and self.stmt.annotation.id == "bytes32"
):
if isinstance(sub.typ, ByteArrayType):
if sub.typ.maxlen != 32:
raise TypeMismatchException(
"Invalid type, expected: bytes32. String is incorrect length.",
self.stmt,
)
return
elif isinstance(sub.typ, BaseType):
if sub.typ.typ != "bytes32":
raise TypeMismatchException(
"Invalid type, expected: bytes32", self.stmt
)
return
else:
raise TypeMismatchException("Invalid type, expected: bytes32", self.stmt)
elif isinstance(self.stmt.annotation, ast.Dict):
if not isinstance(sub.typ, StructType):
raise TypeMismatchException("Invalid type, expected a struct")
elif isinstance(self.stmt.annotation, ast.Subscript):
if not isinstance(sub.typ, (ListType, ByteArrayType)): # check list assign.
raise TypeMismatchException(
"Invalid type, expected: %s" % self.stmt.annotation.value.id, self.stmt
)
# Check that the integer literal, can be assigned to uint256 if necessary.
elif (self.stmt.annotation.id, sub.typ.typ) == (
"uint256",
"int128",
) and sub.typ.is_literal:
if not SizeLimits.in_bounds("uint256", sub.value):
raise InvalidLiteralException(
"Invalid uint256 assignment, value not in uint256 range.", self.stmt
)
elif self.stmt.annotation.id != sub.typ.typ and not sub.typ.unit:
raise TypeMismatchException(
"Invalid type, expected: %s" % self.stmt.annotation.id, self.stmt
)
|
def _check_valid_assign(self, sub):
if isinstance(self.stmt.annotation, ast.Call): # unit style: num(wei)
if self.stmt.annotation.func.id != sub.typ.typ and not sub.typ.is_literal:
raise TypeMismatchException(
"Invalid type, expected: %s" % self.stmt.annotation.func.id, self.stmt
)
elif isinstance(self.stmt.annotation, ast.Dict):
if not isinstance(sub.typ, StructType):
raise TypeMismatchException("Invalid type, expected a struct")
elif isinstance(self.stmt.annotation, ast.Subscript):
if not isinstance(sub.typ, (ListType, ByteArrayType)): # check list assign.
raise TypeMismatchException(
"Invalid type, expected: %s" % self.stmt.annotation.value.id, self.stmt
)
# Check that the integer literal, can be assigned to uint256 if necessary.
elif (self.stmt.annotation.id, sub.typ.typ) == (
"uint256",
"int128",
) and sub.typ.is_literal:
if not SizeLimits.in_bounds("uint256", sub.value):
raise InvalidLiteralException(
"Invalid uint256 assignment, value not in uint256 range.", self.stmt
)
elif self.stmt.annotation.id != sub.typ.typ and not sub.typ.unit:
raise TypeMismatchException(
"Invalid type, expected: %s" % self.stmt.annotation.id, self.stmt
)
|
https://github.com/vyperlang/vyper/issues/1088
|
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 51879)
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 647, in process_request_thread
self.finish_request(request, client_address)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 357, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 717, in __init__
self.handle()
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/server.py", line 426, in handle
self.handle_one_request()
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/server.py", line 414, in handle_one_request
method()
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/EGG-INFO/scripts/vyper-serve", line 66, in do_POST
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/EGG-INFO/scripts/vyper-serve", line 89, in _compile
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/compiler.py", line 44, in mk_full_signature
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/compiler.py", line 27, in gas_estimate
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 794, in parse_to_lll
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 224, in parse_tree_to_lll
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 172, in parse_other_functions
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 530, in parse_func
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 530, in <listcomp>
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 547, in parse_body
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 601, in parse_stmt
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 76, in __init__
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 150, in ann_assign
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 105, in _check_valid_assign
AttributeError: 'ByteArrayType' object has no attribute 'typ'
----------------------------------------
|
AttributeError
|
def ann_assign(self):
self.context.set_in_assignment(True)
typ = parse_type(
self.stmt.annotation, location="memory", custom_units=self.context.custom_units
)
if (
isinstance(self.stmt.target, ast.Attribute)
and self.stmt.target.value.id == "self"
):
raise TypeMismatchException("May not redefine storage variables.", self.stmt)
varname = self.stmt.target.id
pos = self.context.new_variable(varname, typ)
o = LLLnode.from_list("pass", typ=None, pos=pos)
if self.stmt.value is not None:
sub = Expr(self.stmt.value, self.context).lll_node
# If bytes[32] to bytes32 assignment rewrite sub as bytes32.
if (
isinstance(sub.typ, ByteArrayType)
and sub.typ.maxlen == 32
and isinstance(typ, BaseType)
and typ.typ == "bytes32"
):
bytez, bytez_length = string_to_bytes(self.stmt.value.s)
sub = LLLnode(
bytes_to_int(bytez), typ=BaseType("bytes32"), pos=getpos(self.stmt)
)
self._check_valid_assign(sub)
self._check_same_variable_assign(sub)
variable_loc = LLLnode.from_list(
pos, typ=typ, location="memory", pos=getpos(self.stmt)
)
o = make_setter(variable_loc, sub, "memory", pos=getpos(self.stmt))
self.context.set_in_assignment(False)
return o
|
def ann_assign(self):
self.context.set_in_assignment(True)
typ = parse_type(
self.stmt.annotation, location="memory", custom_units=self.context.custom_units
)
if (
isinstance(self.stmt.target, ast.Attribute)
and self.stmt.target.value.id == "self"
):
raise TypeMismatchException("May not redefine storage variables.", self.stmt)
varname = self.stmt.target.id
pos = self.context.new_variable(varname, typ)
o = LLLnode.from_list("pass", typ=None, pos=pos)
if self.stmt.value is not None:
sub = Expr(self.stmt.value, self.context).lll_node
self._check_valid_assign(sub)
self._check_same_variable_assign(sub)
variable_loc = LLLnode.from_list(
pos, typ=typ, location="memory", pos=getpos(self.stmt)
)
o = make_setter(variable_loc, sub, "memory", pos=getpos(self.stmt))
self.context.set_in_assignment(False)
return o
|
https://github.com/vyperlang/vyper/issues/1088
|
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 51879)
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 647, in process_request_thread
self.finish_request(request, client_address)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 357, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socketserver.py", line 717, in __init__
self.handle()
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/server.py", line 426, in handle
self.handle_one_request()
File "/usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/server.py", line 414, in handle_one_request
method()
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/EGG-INFO/scripts/vyper-serve", line 66, in do_POST
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/EGG-INFO/scripts/vyper-serve", line 89, in _compile
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/compiler.py", line 44, in mk_full_signature
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/compiler.py", line 27, in gas_estimate
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 794, in parse_to_lll
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 224, in parse_tree_to_lll
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 172, in parse_other_functions
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 530, in parse_func
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 530, in <listcomp>
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 547, in parse_body
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/parser.py", line 601, in parse_stmt
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 76, in __init__
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 150, in ann_assign
File "/Users/ryuya.nakamura/work/vyper/venv/lib/python3.7/site-packages/vyper-0.1.0b4-py3.7.egg/vyper/parser/stmt.py", line 105, in _check_valid_assign
AttributeError: 'ByteArrayType' object has no attribute 'typ'
----------------------------------------
|
AttributeError
|
def parse_body(code, context):
if not isinstance(code, list):
return parse_stmt(code, context)
o = []
for stmt in code:
lll = parse_stmt(stmt, context)
o.append(lll)
return LLLnode.from_list(["seq"] + o, pos=getpos(code[0]) if code else None)
|
def parse_body(code, context):
if not isinstance(code, list):
return parse_stmt(code, context)
o = []
for stmt in code:
o.append(parse_stmt(stmt, context))
return LLLnode.from_list(["seq"] + o, pos=getpos(code[0]) if code else None)
|
https://github.com/vyperlang/vyper/issues/918
|
Traceback (most recent call last):
File "vyper-bin", line 56, in <module>
print(optimizer.optimize(parse_to_lll(code)))
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 991, in parse_to_lll
return parse_tree_to_lll(code, kode)
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 464, in parse_tree_to_lll
o, otherfuncs, _globals, sigs, external_contracts, origcode, _custom_units, defaultfunc, runtime_only
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 417, in parse_other_functions
sub.append(parse_func(_def, _globals, {**{'self': sigs}, **external_contracts}, origcode, _custom_units)) # noqa E999
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 554, in parse_func
['seq'] + clampers + [parse_body(c, context) for c in code.body] + ['stop']
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 554, in <listcomp>
['seq'] + clampers + [parse_body(c, context) for c in code.body] + ['stop']
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 572, in parse_body
return parse_stmt(code, context)
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 760, in parse_stmt
return Stmt(stmt, context).lll_node
File "/home/jacques/projects/vyper/vyper/parser/stmt.py", line 68, in __init__
self.lll_node = self.stmt_table[stmt_type]()
File "/home/jacques/projects/vyper/vyper/parser/stmt.py", line 75, in expr
return Stmt(self.stmt.value, self.context).lll_node
File "/home/jacques/projects/vyper/vyper/parser/stmt.py", line 72, in __init__
raise StructureException("Unsupported statement type: %s" % type(stmt), stmt)
vyper.exceptions.StructureException: line 5: Unsupported statement type: <class '_ast.Str'>
|
vyper.exceptions.StructureException
|
def __init__(self, stmt, context):
self.stmt = stmt
self.context = context
self.stmt_table = {
ast.Expr: self.expr,
ast.Pass: self.parse_pass,
ast.AnnAssign: self.ann_assign,
ast.Assign: self.assign,
ast.If: self.parse_if,
ast.Call: self.call,
ast.Assert: self.parse_assert,
ast.For: self.parse_for,
ast.AugAssign: self.aug_assign,
ast.Break: self.parse_break,
ast.Continue: self.parse_continue,
ast.Return: self.parse_return,
ast.Delete: self.parse_delete,
ast.Str: self.parse_docblock, # docblock
}
stmt_type = self.stmt.__class__
if stmt_type in self.stmt_table:
lll_node = self.stmt_table[stmt_type]()
self.lll_node = lll_node
elif isinstance(stmt, ast.Name) and stmt.id == "throw":
self.lll_node = LLLnode.from_list(["assert", 0], typ=None, pos=getpos(stmt))
else:
raise StructureException("Unsupported statement type: %s" % type(stmt), stmt)
|
def __init__(self, stmt, context):
self.stmt = stmt
self.context = context
self.stmt_table = {
ast.Expr: self.expr,
ast.Pass: self.parse_pass,
ast.AnnAssign: self.ann_assign,
ast.Assign: self.assign,
ast.If: self.parse_if,
ast.Call: self.call,
ast.Assert: self.parse_assert,
ast.For: self.parse_for,
ast.AugAssign: self.aug_assign,
ast.Break: self.parse_break,
ast.Continue: self.parse_continue,
ast.Return: self.parse_return,
ast.Delete: self.parse_delete,
}
stmt_type = self.stmt.__class__
if stmt_type in self.stmt_table:
self.lll_node = self.stmt_table[stmt_type]()
elif isinstance(stmt, ast.Name) and stmt.id == "throw":
self.lll_node = LLLnode.from_list(["assert", 0], typ=None, pos=getpos(stmt))
else:
raise StructureException("Unsupported statement type: %s" % type(stmt), stmt)
|
https://github.com/vyperlang/vyper/issues/918
|
Traceback (most recent call last):
File "vyper-bin", line 56, in <module>
print(optimizer.optimize(parse_to_lll(code)))
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 991, in parse_to_lll
return parse_tree_to_lll(code, kode)
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 464, in parse_tree_to_lll
o, otherfuncs, _globals, sigs, external_contracts, origcode, _custom_units, defaultfunc, runtime_only
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 417, in parse_other_functions
sub.append(parse_func(_def, _globals, {**{'self': sigs}, **external_contracts}, origcode, _custom_units)) # noqa E999
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 554, in parse_func
['seq'] + clampers + [parse_body(c, context) for c in code.body] + ['stop']
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 554, in <listcomp>
['seq'] + clampers + [parse_body(c, context) for c in code.body] + ['stop']
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 572, in parse_body
return parse_stmt(code, context)
File "/home/jacques/projects/vyper/vyper/parser/parser.py", line 760, in parse_stmt
return Stmt(stmt, context).lll_node
File "/home/jacques/projects/vyper/vyper/parser/stmt.py", line 68, in __init__
self.lll_node = self.stmt_table[stmt_type]()
File "/home/jacques/projects/vyper/vyper/parser/stmt.py", line 75, in expr
return Stmt(self.stmt.value, self.context).lll_node
File "/home/jacques/projects/vyper/vyper/parser/stmt.py", line 72, in __init__
raise StructureException("Unsupported statement type: %s" % type(stmt), stmt)
vyper.exceptions.StructureException: line 5: Unsupported statement type: <class '_ast.Str'>
|
vyper.exceptions.StructureException
|
def send_file(
filename_or_fp,
mimetype=None,
as_attachment=False,
attachment_filename=None,
add_etags=True,
cache_timeout=None,
conditional=False,
last_modified=None,
):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an ``X-Sendfile`` header. This however
requires support of the underlying webserver for ``X-Sendfile``.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
ETags will also be attached automatically if a `filename` is provided. You
can turn this off by setting `add_etags=False`.
If `conditional=True` and `filename` is provided, this method will try to
upgrade the response stream to support range requests. This will allow
the request to be answered with partial content response.
Please never pass filenames to this function from user sources;
you should use :func:`send_from_directory` instead.
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
.. versionchanged:: 0.12
The filename is no longer automatically inferred from file objects. If
you want to use automatic mimetype and etag support, pass a filepath via
`filename_or_fp` or `attachment_filename`.
.. versionchanged:: 0.12
The `attachment_filename` is preferred over `filename` for MIME-type
detection.
:param filename_or_fp: the filename of the file to send in `latin-1`.
This is relative to the :attr:`~Flask.root_path`
if a relative path is specified.
Alternatively a file object might be provided in
which case ``X-Sendfile`` might not work and fall
back to the traditional method. Make sure that the
file pointer is positioned at the start of data to
send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided. If a file path is
given, auto detection happens as fallback, otherwise an
error will be raised.
:param as_attachment: set to ``True`` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to ``False`` to disable attaching of etags.
:param conditional: set to ``True`` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When ``None``
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
:param last_modified: set the ``Last-Modified`` header to this value,
a :class:`~datetime.datetime` or timestamp.
If a file was passed, this overrides its mtime.
"""
mtime = None
fsize = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
file = None
if attachment_filename is None:
attachment_filename = os.path.basename(filename)
else:
file = filename_or_fp
filename = None
if mimetype is None:
if attachment_filename is not None:
mimetype = (
mimetypes.guess_type(attachment_filename)[0]
or "application/octet-stream"
)
if mimetype is None:
raise ValueError(
"Unable to infer MIME-type because no filename is available. "
"Please set either `attachment_filename`, pass a filepath to "
"`filename_or_fp` or set your own MIME-type via `mimetype`."
)
headers = Headers()
if as_attachment:
if attachment_filename is None:
raise TypeError("filename unavailable, required for sending as attachment")
headers.add("Content-Disposition", "attachment", filename=attachment_filename)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers["X-Sendfile"] = filename
fsize = os.path.getsize(filename)
headers["Content-Length"] = fsize
data = None
else:
if file is None:
file = open(filename, "rb")
mtime = os.path.getmtime(filename)
fsize = os.path.getsize(filename)
headers["Content-Length"] = fsize
data = wrap_file(request.environ, file)
rv = current_app.response_class(
data, mimetype=mimetype, headers=headers, direct_passthrough=True
)
if last_modified is not None:
rv.last_modified = last_modified
elif mtime is not None:
rv.last_modified = mtime
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
from warnings import warn
try:
rv.set_etag(
"%s-%s-%s"
% (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode("utf-8")
if isinstance(filename, text_type)
else filename
)
& 0xFFFFFFFF,
)
)
except OSError:
warn(
"Access %s failed, maybe it does not exist, so ignore etags in "
"headers" % filename,
stacklevel=2,
)
if conditional:
if callable(getattr(Range, "to_content_range_header", None)):
# Werkzeug supports Range Requests
# Remove this test when support for Werkzeug <0.12 is dropped
try:
rv = rv.make_conditional(
request, accept_ranges=True, complete_length=fsize
)
except RequestedRangeNotSatisfiable:
if file is not None:
file.close()
raise
else:
rv = rv.make_conditional(request)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop("x-sendfile", None)
return rv
|
def send_file(
filename_or_fp,
mimetype=None,
as_attachment=False,
attachment_filename=None,
add_etags=True,
cache_timeout=None,
conditional=False,
last_modified=None,
):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an ``X-Sendfile`` header. This however
requires support of the underlying webserver for ``X-Sendfile``.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
ETags will also be attached automatically if a `filename` is provided. You
can turn this off by setting `add_etags=False`.
If `conditional=True` and `filename` is provided, this method will try to
upgrade the response stream to support range requests. This will allow
the request to be answered with partial content response.
Please never pass filenames to this function from user sources;
you should use :func:`send_from_directory` instead.
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
.. versionchanged:: 0.12
The filename is no longer automatically inferred from file objects. If
you want to use automatic mimetype and etag support, pass a filepath via
`filename_or_fp` or `attachment_filename`.
.. versionchanged:: 0.12
The `attachment_filename` is preferred over `filename` for MIME-type
detection.
:param filename_or_fp: the filename of the file to send in `latin-1`.
This is relative to the :attr:`~Flask.root_path`
if a relative path is specified.
Alternatively a file object might be provided in
which case ``X-Sendfile`` might not work and fall
back to the traditional method. Make sure that the
file pointer is positioned at the start of data to
send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided. If a file path is
given, auto detection happens as fallback, otherwise an
error will be raised.
:param as_attachment: set to ``True`` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to ``False`` to disable attaching of etags.
:param conditional: set to ``True`` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When ``None``
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
:param last_modified: set the ``Last-Modified`` header to this value,
a :class:`~datetime.datetime` or timestamp.
If a file was passed, this overrides its mtime.
"""
mtime = None
fsize = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
file = None
if attachment_filename is None:
attachment_filename = os.path.basename(filename)
else:
file = filename_or_fp
filename = None
if mimetype is None:
if attachment_filename is not None:
mimetype = (
mimetypes.guess_type(attachment_filename)[0]
or "application/octet-stream"
)
if mimetype is None:
raise ValueError(
"Unable to infer MIME-type because no filename is available. "
"Please set either `attachment_filename`, pass a filepath to "
"`filename_or_fp` or set your own MIME-type via `mimetype`."
)
headers = Headers()
if as_attachment:
if attachment_filename is None:
raise TypeError("filename unavailable, required for sending as attachment")
headers.add("Content-Disposition", "attachment", filename=attachment_filename)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers["X-Sendfile"] = filename
fsize = os.path.getsize(filename)
headers["Content-Length"] = fsize
data = None
else:
if file is None:
file = open(filename, "rb")
mtime = os.path.getmtime(filename)
fsize = os.path.getsize(filename)
headers["Content-Length"] = fsize
data = wrap_file(request.environ, file)
rv = current_app.response_class(
data, mimetype=mimetype, headers=headers, direct_passthrough=True
)
if last_modified is not None:
rv.last_modified = last_modified
elif mtime is not None:
rv.last_modified = mtime
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
from warnings import warn
try:
rv.set_etag(
"%s-%s-%s"
% (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode("utf-8")
if isinstance(filename, text_type)
else filename
)
& 0xFFFFFFFF,
)
)
except OSError:
warn(
"Access %s failed, maybe it does not exist, so ignore etags in "
"headers" % filename,
stacklevel=2,
)
if conditional:
if callable(getattr(Range, "to_content_range_header", None)):
# Werkzeug supports Range Requests
# Remove this test when support for Werkzeug <0.12 is dropped
try:
rv = rv.make_conditional(
request, accept_ranges=True, complete_length=fsize
)
except RequestedRangeNotSatisfiable:
file.close()
raise
else:
rv = rv.make_conditional(request)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop("x-sendfile", None)
return rv
|
https://github.com/pallets/flask/issues/2526
|
Traceback (most recent call last):
File "/home/adrian/dev/indico/env/lib/python2.7/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/home/adrian/dev/indico/env/lib/python2.7/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/adrian/dev/indico/env/lib/python2.7/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/adrian/dev/indico/env/lib/python2.7/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/home/adrian/dev/indico/env/lib/python2.7/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/adrian/dev/indico/src/flasktest.py", line 10, in index
return send_file('/etc/passwd', mimetype='text/plain', conditional=True)
File "/home/adrian/dev/indico/env/lib/python2.7/site-packages/flask/helpers.py", line 594, in send_file
file.close()
AttributeError: 'NoneType' object has no attribute 'close'
|
AttributeError
|
def __init__(
self,
model: "Type[Model]",
db: "BaseDBAsyncClient",
prefetch_map=None,
prefetch_queries=None,
) -> None:
self.model = model
self.db: "BaseDBAsyncClient" = db
self.prefetch_map = prefetch_map if prefetch_map else {}
self._prefetch_queries = prefetch_queries if prefetch_queries else {}
key = f"{self.db.connection_name}:{self.model._meta.table}"
if key not in EXECUTOR_CACHE:
self.regular_columns, columns = self._prepare_insert_columns()
self.insert_query = self._prepare_insert_statement(columns)
self.column_map: Dict[str, Callable[[Any, Any], Any]] = {}
for column in self.regular_columns:
field_object = self.model._meta.fields_map[column]
if field_object.__class__ in self.TO_DB_OVERRIDE:
self.column_map[column] = partial(
self.TO_DB_OVERRIDE[field_object.__class__], field_object
)
else:
self.column_map[column] = field_object.to_db_value
table = Table(self.model._meta.table)
self.delete_query = str(
self.model._meta.basequery.where(
table[self.model._meta.db_pk_field] == self.Parameter(0)
).delete()
)
self.update_cache: Dict[str, str] = {}
EXECUTOR_CACHE[key] = (
self.regular_columns,
self.insert_query,
self.column_map,
self.delete_query,
self.update_cache,
)
else:
(
self.regular_columns,
self.insert_query,
self.column_map,
self.delete_query,
self.update_cache,
) = EXECUTOR_CACHE[key]
|
def __init__(
self,
model: "Type[Model]",
db: "BaseDBAsyncClient",
prefetch_map=None,
prefetch_queries=None,
) -> None:
self.model = model
self.db: "BaseDBAsyncClient" = db
self.prefetch_map = prefetch_map if prefetch_map else {}
self._prefetch_queries = prefetch_queries if prefetch_queries else {}
key = f"{self.db.connection_name}:{self.model._meta.table}"
if key not in EXECUTOR_CACHE:
self.regular_columns, columns = self._prepare_insert_columns()
self.insert_query = self._prepare_insert_statement(columns)
self.column_map: Dict[str, Callable[[Any, Any], Any]] = {}
for column in self.regular_columns:
field_object = self.model._meta.fields_map[column]
if field_object.__class__ in self.TO_DB_OVERRIDE:
self.column_map[column] = partial(
self.TO_DB_OVERRIDE[field_object.__class__], field_object
)
else:
self.column_map[column] = field_object.to_db_value
table = Table(self.model._meta.table)
self.delete_query = str(
self.model._meta.basequery.where(
getattr(table, self.model._meta.db_pk_field) == self.Parameter(0)
).delete()
)
self.update_cache: Dict[str, str] = {}
EXECUTOR_CACHE[key] = (
self.regular_columns,
self.insert_query,
self.column_map,
self.delete_query,
self.update_cache,
)
else:
(
self.regular_columns,
self.insert_query,
self.column_map,
self.delete_query,
self.update_cache,
) = EXECUTOR_CACHE[key]
|
https://github.com/tortoise/tortoise-orm/issues/233
|
Traceback (most recent call last):
File "/opt/project/src/server.py", line 54, in <module>
asyncio.run(main())
File "/usr/local/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 579, in run_until_complete
return future.result()
File "/opt/project/src/server.py", line 47, in main
async for item in ModelOne.all().prefetch_related('many_to_many'):
File "/usr/local/lib/python3.7/site-packages/tortoise/queryset.py", line 512, in __aiter__
for val in await self:
File "/usr/local/lib/python3.7/site-packages/tortoise/queryset.py", line 521, in _execute
).execute_select(self.query, custom_fields=list(self._annotations.keys()))
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 90, in execute_select
await self._execute_prefetch_queries(instance_list)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 327, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 320, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 232, in _prefetch_m2m_relation
*[getattr(related_query_table, field).as_(field) for field in related_query.fields],
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 232, in <listcomp>
*[getattr(related_query_table, field).as_(field) for field in related_query.fields],
AttributeError: 'NoneType' object has no attribute 'as_'
|
AttributeError
|
def get_update_sql(self, update_fields: Optional[List[str]]) -> str:
"""
Generates the SQL for updating a model depending on provided update_fields.
Result is cached for performance.
"""
key = ",".join(update_fields) if update_fields else ""
if key in self.update_cache:
return self.update_cache[key]
table = Table(self.model._meta.table)
query = self.db.query_class.update(table)
count = 0
for field in update_fields or self.model._meta.fields_db_projection.keys():
db_field = self.model._meta.fields_db_projection[field]
field_object = self.model._meta.fields_map[field]
if not field_object.pk:
query = query.set(db_field, self.Parameter(count))
count += 1
query = query.where(table[self.model._meta.db_pk_field] == self.Parameter(count))
sql = self.update_cache[key] = query.get_sql()
return sql
|
def get_update_sql(self, update_fields: Optional[List[str]]) -> str:
"""
Generates the SQL for updating a model depending on provided update_fields.
Result is cached for performance.
"""
key = ",".join(update_fields) if update_fields else ""
if key in self.update_cache:
return self.update_cache[key]
table = Table(self.model._meta.table)
query = self.db.query_class.update(table)
count = 0
for field in update_fields or self.model._meta.fields_db_projection.keys():
db_field = self.model._meta.fields_db_projection[field]
field_object = self.model._meta.fields_map[field]
if not field_object.pk:
query = query.set(db_field, self.Parameter(count))
count += 1
query = query.where(
getattr(table, self.model._meta.db_pk_field) == self.Parameter(count)
)
sql = self.update_cache[key] = query.get_sql()
return sql
|
https://github.com/tortoise/tortoise-orm/issues/233
|
Traceback (most recent call last):
File "/opt/project/src/server.py", line 54, in <module>
asyncio.run(main())
File "/usr/local/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 579, in run_until_complete
return future.result()
File "/opt/project/src/server.py", line 47, in main
async for item in ModelOne.all().prefetch_related('many_to_many'):
File "/usr/local/lib/python3.7/site-packages/tortoise/queryset.py", line 512, in __aiter__
for val in await self:
File "/usr/local/lib/python3.7/site-packages/tortoise/queryset.py", line 521, in _execute
).execute_select(self.query, custom_fields=list(self._annotations.keys()))
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 90, in execute_select
await self._execute_prefetch_queries(instance_list)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 327, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 320, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 232, in _prefetch_m2m_relation
*[getattr(related_query_table, field).as_(field) for field in related_query.fields],
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 232, in <listcomp>
*[getattr(related_query_table, field).as_(field) for field in related_query.fields],
AttributeError: 'NoneType' object has no attribute 'as_'
|
AttributeError
|
async def _prefetch_m2m_relation(
self, instance_list: list, field: str, related_query
) -> list:
instance_id_set: set = {
self._field_to_db(instance._meta.pk, instance.pk, instance)
for instance in instance_list
}
field_object: fields.ManyToManyFieldInstance = self.model._meta.fields_map[ # type: ignore
field
]
through_table = Table(field_object.through)
subquery = (
self.db.query_class.from_(through_table)
.select(
through_table[field_object.backward_key].as_("_backward_relation_key"),
through_table[field_object.forward_key].as_("_forward_relation_key"),
)
.where(through_table[field_object.backward_key].isin(instance_id_set))
)
related_query_table = Table(related_query.model._meta.table)
related_pk_field = related_query.model._meta.db_pk_field
query = (
related_query.query.join(subquery)
.on(subquery._forward_relation_key == related_query_table[related_pk_field])
.select(
subquery._backward_relation_key.as_("_backward_relation_key"),
*[related_query_table[field].as_(field) for field in related_query.fields],
)
)
if related_query._q_objects:
joined_tables: List[Table] = []
modifier = QueryModifier()
for node in related_query._q_objects:
modifier &= node.resolve(
model=related_query.model,
annotations=related_query._annotations,
custom_filters=related_query._custom_filters,
)
where_criterion, joins, having_criterion = modifier.get_query_modifiers()
for join in joins:
if join[0] not in joined_tables:
query = query.join(join[0], how=JoinType.left_outer).on(join[1])
joined_tables.append(join[0])
if where_criterion:
query = query.where(where_criterion)
if having_criterion:
query = query.having(having_criterion)
raw_results = await self.db.execute_query(query.get_sql())
relations = {
(
self.model._meta.pk.to_python_value(e["_backward_relation_key"]),
field_object.field_type._meta.pk.to_python_value(e[related_pk_field]),
)
for e in raw_results
}
related_object_list = [related_query.model._init_from_db(**e) for e in raw_results]
await self.__class__(
model=related_query.model, db=self.db, prefetch_map=related_query._prefetch_map
).fetch_for_list(related_object_list)
related_object_map = {e.pk: e for e in related_object_list}
relation_map: Dict[str, list] = {}
for object_id, related_object_id in relations:
if object_id not in relation_map:
relation_map[object_id] = []
relation_map[object_id].append(related_object_map[related_object_id])
for instance in instance_list:
relation_container = getattr(instance, field)
relation_container._set_result_for_query(relation_map.get(instance.pk, []))
return instance_list
|
async def _prefetch_m2m_relation(
self, instance_list: list, field: str, related_query
) -> list:
instance_id_set: set = {
self._field_to_db(instance._meta.pk, instance.pk, instance)
for instance in instance_list
}
field_object: fields.ManyToManyFieldInstance = self.model._meta.fields_map[ # type: ignore
field
]
through_table = Table(field_object.through)
subquery = (
self.db.query_class.from_(through_table)
.select(
getattr(through_table, field_object.backward_key).as_(
"_backward_relation_key"
),
getattr(through_table, field_object.forward_key).as_(
"_forward_relation_key"
),
)
.where(getattr(through_table, field_object.backward_key).isin(instance_id_set))
)
related_query_table = Table(related_query.model._meta.table)
related_pk_field = related_query.model._meta.db_pk_field
query = (
related_query.query.join(subquery)
.on(
subquery._forward_relation_key
== getattr(related_query_table, related_pk_field)
)
.select(
subquery._backward_relation_key.as_("_backward_relation_key"),
*[
getattr(related_query_table, field).as_(field)
for field in related_query.fields
],
)
)
if related_query._q_objects:
joined_tables: List[Table] = []
modifier = QueryModifier()
for node in related_query._q_objects:
modifier &= node.resolve(
model=related_query.model,
annotations=related_query._annotations,
custom_filters=related_query._custom_filters,
)
where_criterion, joins, having_criterion = modifier.get_query_modifiers()
for join in joins:
if join[0] not in joined_tables:
query = query.join(join[0], how=JoinType.left_outer).on(join[1])
joined_tables.append(join[0])
if where_criterion:
query = query.where(where_criterion)
if having_criterion:
query = query.having(having_criterion)
raw_results = await self.db.execute_query(query.get_sql())
relations = {
(
self.model._meta.pk.to_python_value(e["_backward_relation_key"]),
field_object.field_type._meta.pk.to_python_value(e[related_pk_field]),
)
for e in raw_results
}
related_object_list = [related_query.model._init_from_db(**e) for e in raw_results]
await self.__class__(
model=related_query.model, db=self.db, prefetch_map=related_query._prefetch_map
).fetch_for_list(related_object_list)
related_object_map = {e.pk: e for e in related_object_list}
relation_map: Dict[str, list] = {}
for object_id, related_object_id in relations:
if object_id not in relation_map:
relation_map[object_id] = []
relation_map[object_id].append(related_object_map[related_object_id])
for instance in instance_list:
relation_container = getattr(instance, field)
relation_container._set_result_for_query(relation_map.get(instance.pk, []))
return instance_list
|
https://github.com/tortoise/tortoise-orm/issues/233
|
Traceback (most recent call last):
File "/opt/project/src/server.py", line 54, in <module>
asyncio.run(main())
File "/usr/local/lib/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 579, in run_until_complete
return future.result()
File "/opt/project/src/server.py", line 47, in main
async for item in ModelOne.all().prefetch_related('many_to_many'):
File "/usr/local/lib/python3.7/site-packages/tortoise/queryset.py", line 512, in __aiter__
for val in await self:
File "/usr/local/lib/python3.7/site-packages/tortoise/queryset.py", line 521, in _execute
).execute_select(self.query, custom_fields=list(self._annotations.keys()))
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 90, in execute_select
await self._execute_prefetch_queries(instance_list)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 327, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 320, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 232, in _prefetch_m2m_relation
*[getattr(related_query_table, field).as_(field) for field in related_query.fields],
File "/usr/local/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 232, in <listcomp>
*[getattr(related_query_table, field).as_(field) for field in related_query.fields],
AttributeError: 'NoneType' object has no attribute 'as_'
|
AttributeError
|
async def _prefetch_reverse_relation(
self, instance_list: list, field: str, related_query
) -> list:
instance_id_set = {
self._field_to_db(instance._meta.pk, instance.pk, instance)
for instance in instance_list
} # type: Set[Any]
backward_relation_manager = getattr(self.model, field)
relation_field = backward_relation_manager.relation_field
related_object_list = await related_query.filter(
**{"{}__in".format(relation_field): list(instance_id_set)}
)
related_object_map = {} # type: Dict[str, list]
for entry in related_object_list:
object_id = getattr(entry, relation_field)
if object_id in related_object_map.keys():
related_object_map[object_id].append(entry)
else:
related_object_map[object_id] = [entry]
for instance in instance_list:
relation_container = getattr(instance, field)
relation_container._set_result_for_query(
related_object_map.get(instance.pk, [])
)
return instance_list
|
async def _prefetch_reverse_relation(
self, instance_list: list, field: str, related_query
) -> list:
instance_id_set = {instance.pk for instance in instance_list} # type: Set[Any]
backward_relation_manager = getattr(self.model, field)
relation_field = backward_relation_manager.relation_field
related_object_list = await related_query.filter(
**{"{}__in".format(relation_field): list(instance_id_set)}
)
related_object_map = {} # type: Dict[str, list]
for entry in related_object_list:
object_id = getattr(entry, relation_field)
if object_id in related_object_map.keys():
related_object_map[object_id].append(entry)
else:
related_object_map[object_id] = [entry]
for instance in instance_list:
relation_container = getattr(instance, field)
relation_container._set_result_for_query(
related_object_map.get(instance.pk, [])
)
return instance_list
|
https://github.com/tortoise/tortoise-orm/issues/151
|
DEBUG:asyncio:Using selector: EpollSelector
INFO:tortoise:Tortoise-ORM startup
connections: {'default': {'engine': 'tortoise.backends.asyncpg', 'credentials': {'port': 5432, 'database': 'postgres', 'host': '172.20.0.2', 'user': 'postgres', 'password': 'postgres'}}}
apps: {'models': {'models': ['__main__'], 'default_connection': 'default'}}
DEBUG:db_client:Created connection <asyncpg.connection.Connection object at 0x7fd621f828b8> with params: {'host': '172.20.0.2', 'port': 5432, 'user': 'postgres', 'database': 'postgres'}
DEBUG:db_client:CREATE TABLE IF NOT EXISTS "modela" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb_modela" ("modelb_id" UUID NOT NULL REFERENCES "modelb" (id) ON DELETE CASCADE,"modela_id" UUID NOT NULL REFERENCES "modela" (id) ON DELETE CASCADE);
DEBUG:db_client:INSERT INTO "modela" ("id") VALUES ($1): ['075c03a6-b777-4044-876e-d5877478318d']
DEBUG:db_client:INSERT INTO "modelb" ("id") VALUES ($1): ['8a5ad9d6-fc30-4aba-a41b-377595cddb5f']
DEBUG:db_client:SELECT "modelb_id","modela_id" FROM "modelb_modela" WHERE "modelb_id"='8a5ad9d6-fc30-4aba-a41b-377595cddb5f' AND "modela_id"='075c03a6-b777-4044-876e-d5877478318d'
DEBUG:db_client:INSERT INTO "modelb_modela" ("modela_id","modelb_id") VALUES ('075c03a6-b777-4044-876e-d5877478318d','8a5ad9d6-fc30-4aba-a41b-377595cddb5f')
INFO:root:asyncpg result [<Record _backward_relation_key=UUID('075c03a6-b777-4044-876e-d5877478318d') id=UUID('8a5ad9d6-fc30-4aba-a41b-377595cddb5f')>]
DEBUG:db_client:SELECT "id" FROM "modela" WHERE "id"='075c03a6-b777-4044-876e-d5877478318d' LIMIT 2
DEBUG:db_client:SELECT "sq0"."_backward_relation_key" "_backward_relation_key","modelb"."id" "id" FROM "modelb" JOIN (SELECT "modela_id" "_backward_relation_key","modelb_id" "_forward_relation_key" FROM "modelb_modela" WHERE "modela_id" IN (075c03a6-b777-4044-876e-d5877478318d)) "sq0" ON "sq0"."_forward_relation_key"="modelb"."id"
Traceback (most recent call last):
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 63, in translate_exceptions_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 30, in retry_connection_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 174, in execute_query
return await connection.fetch(query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 421, in fetch
return await self._execute(query, args, 0, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1414, in _execute
query, args, limit, timeout, return_status=return_status)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1422, in __execute
return await self._do_execute(query, executor, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1434, in _do_execute
stmt = await self._get_statement(query, None)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File "asyncpg/protocol/protocol.pyx", line 163, in prepare
asyncpg.exceptions.PostgresSyntaxError: syntax error at or near "c03a6"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 50, in <module>
asyncio.run(main())
File "/usr/lib64/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib64/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "main.py", line 47, in main
await a.fetch_related("b_models")
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/models.py", line 422, in fetch_related
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 284, in fetch_for_list
await self._execute_prefetch_queries(instance_list)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 267, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 259, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 201, in _prefetch_m2m_relation
raw_results = await self.db.execute_query(query.get_sql())
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 65, in translate_exceptions_
raise OperationalError(exc)
tortoise.exceptions.OperationalError: syntax error at or near "c03a6"
|
asyncpg.exceptions.PostgresSyntaxError
|
async def _prefetch_m2m_relation(
self, instance_list: list, field: str, related_query
) -> list:
instance_id_set = {
self._field_to_db(instance._meta.pk, instance.pk, instance)
for instance in instance_list
} # type: Set[Any]
field_object = self.model._meta.fields_map[field]
through_table = Table(field_object.through)
subquery = (
self.db.query_class.from_(through_table)
.select(
getattr(through_table, field_object.backward_key).as_(
"_backward_relation_key"
),
getattr(through_table, field_object.forward_key).as_(
"_forward_relation_key"
),
)
.where(getattr(through_table, field_object.backward_key).isin(instance_id_set))
)
related_query_table = Table(related_query.model._meta.table)
related_pk_field = related_query.model._meta.db_pk_field
query = (
related_query.query.join(subquery)
.on(
subquery._forward_relation_key
== getattr(related_query_table, related_pk_field)
)
.select(
subquery._backward_relation_key.as_("_backward_relation_key"),
*[
getattr(related_query_table, field).as_(field)
for field in related_query.fields
],
)
)
if related_query._q_objects:
joined_tables = [] # type: List[Table]
modifier = QueryModifier()
for node in related_query._q_objects:
modifier &= node.resolve(
model=related_query.model,
annotations=related_query._annotations,
custom_filters=related_query._custom_filters,
)
where_criterion, joins, having_criterion = modifier.get_query_modifiers()
for join in joins:
if join[0] not in joined_tables:
query = query.join(join[0], how=JoinType.left_outer).on(join[1])
joined_tables.append(join[0])
if where_criterion:
query = query.where(where_criterion)
if having_criterion:
query = query.having(having_criterion)
raw_results = await self.db.execute_query(query.get_sql())
relations = {
(
self.model._meta.pk.to_python_value(e["_backward_relation_key"]),
field_object.type._meta.pk.to_python_value(e[related_pk_field]),
)
for e in raw_results
}
related_object_list = [related_query.model._init_from_db(**e) for e in raw_results]
await self.__class__(
model=related_query.model, db=self.db, prefetch_map=related_query._prefetch_map
).fetch_for_list(related_object_list)
related_object_map = {e.pk: e for e in related_object_list}
relation_map = {} # type: Dict[str, list]
for object_id, related_object_id in relations:
if object_id not in relation_map:
relation_map[object_id] = []
relation_map[object_id].append(related_object_map[related_object_id])
for instance in instance_list:
relation_container = getattr(instance, field)
relation_container._set_result_for_query(relation_map.get(instance.pk, []))
return instance_list
|
async def _prefetch_m2m_relation(
self, instance_list: list, field: str, related_query
) -> list:
instance_id_set = {instance.pk for instance in instance_list} # type: Set[Any]
field_object = self.model._meta.fields_map[field]
through_table = Table(field_object.through)
subquery = (
self.db.query_class.from_(through_table)
.select(
getattr(through_table, field_object.backward_key).as_(
"_backward_relation_key"
),
getattr(through_table, field_object.forward_key).as_(
"_forward_relation_key"
),
)
.where(getattr(through_table, field_object.backward_key).isin(instance_id_set))
)
related_query_table = Table(related_query.model._meta.table)
related_pk_field = related_query.model._meta.db_pk_field
query = (
related_query.query.join(subquery)
.on(
subquery._forward_relation_key
== getattr(related_query_table, related_pk_field)
)
.select(
subquery._backward_relation_key.as_("_backward_relation_key"),
*[
getattr(related_query_table, field).as_(field)
for field in related_query.fields
],
)
)
if related_query._q_objects:
joined_tables = [] # type: List[Table]
modifier = QueryModifier()
for node in related_query._q_objects:
modifier &= node.resolve(
model=related_query.model,
annotations=related_query._annotations,
custom_filters=related_query._custom_filters,
)
where_criterion, joins, having_criterion = modifier.get_query_modifiers()
for join in joins:
if join[0] not in joined_tables:
query = query.join(join[0], how=JoinType.left_outer).on(join[1])
joined_tables.append(join[0])
if where_criterion:
query = query.where(where_criterion)
if having_criterion:
query = query.having(having_criterion)
raw_results = await self.db.execute_query(query.get_sql())
relations = {
(
self.model._meta.pk.to_python_value(e["_backward_relation_key"]),
field_object.type._meta.pk.to_python_value(e[related_pk_field]),
)
for e in raw_results
}
related_object_list = [related_query.model._init_from_db(**e) for e in raw_results]
await self.__class__(
model=related_query.model, db=self.db, prefetch_map=related_query._prefetch_map
).fetch_for_list(related_object_list)
related_object_map = {e.pk: e for e in related_object_list}
relation_map = {} # type: Dict[str, list]
for object_id, related_object_id in relations:
if object_id not in relation_map:
relation_map[object_id] = []
relation_map[object_id].append(related_object_map[related_object_id])
for instance in instance_list:
relation_container = getattr(instance, field)
relation_container._set_result_for_query(relation_map.get(instance.pk, []))
return instance_list
|
https://github.com/tortoise/tortoise-orm/issues/151
|
DEBUG:asyncio:Using selector: EpollSelector
INFO:tortoise:Tortoise-ORM startup
connections: {'default': {'engine': 'tortoise.backends.asyncpg', 'credentials': {'port': 5432, 'database': 'postgres', 'host': '172.20.0.2', 'user': 'postgres', 'password': 'postgres'}}}
apps: {'models': {'models': ['__main__'], 'default_connection': 'default'}}
DEBUG:db_client:Created connection <asyncpg.connection.Connection object at 0x7fd621f828b8> with params: {'host': '172.20.0.2', 'port': 5432, 'user': 'postgres', 'database': 'postgres'}
DEBUG:db_client:CREATE TABLE IF NOT EXISTS "modela" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb_modela" ("modelb_id" UUID NOT NULL REFERENCES "modelb" (id) ON DELETE CASCADE,"modela_id" UUID NOT NULL REFERENCES "modela" (id) ON DELETE CASCADE);
DEBUG:db_client:INSERT INTO "modela" ("id") VALUES ($1): ['075c03a6-b777-4044-876e-d5877478318d']
DEBUG:db_client:INSERT INTO "modelb" ("id") VALUES ($1): ['8a5ad9d6-fc30-4aba-a41b-377595cddb5f']
DEBUG:db_client:SELECT "modelb_id","modela_id" FROM "modelb_modela" WHERE "modelb_id"='8a5ad9d6-fc30-4aba-a41b-377595cddb5f' AND "modela_id"='075c03a6-b777-4044-876e-d5877478318d'
DEBUG:db_client:INSERT INTO "modelb_modela" ("modela_id","modelb_id") VALUES ('075c03a6-b777-4044-876e-d5877478318d','8a5ad9d6-fc30-4aba-a41b-377595cddb5f')
INFO:root:asyncpg result [<Record _backward_relation_key=UUID('075c03a6-b777-4044-876e-d5877478318d') id=UUID('8a5ad9d6-fc30-4aba-a41b-377595cddb5f')>]
DEBUG:db_client:SELECT "id" FROM "modela" WHERE "id"='075c03a6-b777-4044-876e-d5877478318d' LIMIT 2
DEBUG:db_client:SELECT "sq0"."_backward_relation_key" "_backward_relation_key","modelb"."id" "id" FROM "modelb" JOIN (SELECT "modela_id" "_backward_relation_key","modelb_id" "_forward_relation_key" FROM "modelb_modela" WHERE "modela_id" IN (075c03a6-b777-4044-876e-d5877478318d)) "sq0" ON "sq0"."_forward_relation_key"="modelb"."id"
Traceback (most recent call last):
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 63, in translate_exceptions_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 30, in retry_connection_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 174, in execute_query
return await connection.fetch(query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 421, in fetch
return await self._execute(query, args, 0, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1414, in _execute
query, args, limit, timeout, return_status=return_status)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1422, in __execute
return await self._do_execute(query, executor, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1434, in _do_execute
stmt = await self._get_statement(query, None)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File "asyncpg/protocol/protocol.pyx", line 163, in prepare
asyncpg.exceptions.PostgresSyntaxError: syntax error at or near "c03a6"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 50, in <module>
asyncio.run(main())
File "/usr/lib64/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib64/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "main.py", line 47, in main
await a.fetch_related("b_models")
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/models.py", line 422, in fetch_related
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 284, in fetch_for_list
await self._execute_prefetch_queries(instance_list)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 267, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 259, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 201, in _prefetch_m2m_relation
raw_results = await self.db.execute_query(query.get_sql())
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 65, in translate_exceptions_
raise OperationalError(exc)
tortoise.exceptions.OperationalError: syntax error at or near "c03a6"
|
asyncpg.exceptions.PostgresSyntaxError
|
def _query(self):
if not self.instance._saved_in_db:
raise OperationalError(
"This objects hasn't been instanced, call .save() before calling related queries"
)
return self.model.filter(**{self.relation_field: self.instance.pk})
|
def _query(self):
if not self.instance.pk:
raise OperationalError(
"This objects hasn't been instanced, call .save() before"
" calling related queries"
)
return self.model.filter(**{self.relation_field: self.instance.pk})
|
https://github.com/tortoise/tortoise-orm/issues/151
|
DEBUG:asyncio:Using selector: EpollSelector
INFO:tortoise:Tortoise-ORM startup
connections: {'default': {'engine': 'tortoise.backends.asyncpg', 'credentials': {'port': 5432, 'database': 'postgres', 'host': '172.20.0.2', 'user': 'postgres', 'password': 'postgres'}}}
apps: {'models': {'models': ['__main__'], 'default_connection': 'default'}}
DEBUG:db_client:Created connection <asyncpg.connection.Connection object at 0x7fd621f828b8> with params: {'host': '172.20.0.2', 'port': 5432, 'user': 'postgres', 'database': 'postgres'}
DEBUG:db_client:CREATE TABLE IF NOT EXISTS "modela" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb_modela" ("modelb_id" UUID NOT NULL REFERENCES "modelb" (id) ON DELETE CASCADE,"modela_id" UUID NOT NULL REFERENCES "modela" (id) ON DELETE CASCADE);
DEBUG:db_client:INSERT INTO "modela" ("id") VALUES ($1): ['075c03a6-b777-4044-876e-d5877478318d']
DEBUG:db_client:INSERT INTO "modelb" ("id") VALUES ($1): ['8a5ad9d6-fc30-4aba-a41b-377595cddb5f']
DEBUG:db_client:SELECT "modelb_id","modela_id" FROM "modelb_modela" WHERE "modelb_id"='8a5ad9d6-fc30-4aba-a41b-377595cddb5f' AND "modela_id"='075c03a6-b777-4044-876e-d5877478318d'
DEBUG:db_client:INSERT INTO "modelb_modela" ("modela_id","modelb_id") VALUES ('075c03a6-b777-4044-876e-d5877478318d','8a5ad9d6-fc30-4aba-a41b-377595cddb5f')
INFO:root:asyncpg result [<Record _backward_relation_key=UUID('075c03a6-b777-4044-876e-d5877478318d') id=UUID('8a5ad9d6-fc30-4aba-a41b-377595cddb5f')>]
DEBUG:db_client:SELECT "id" FROM "modela" WHERE "id"='075c03a6-b777-4044-876e-d5877478318d' LIMIT 2
DEBUG:db_client:SELECT "sq0"."_backward_relation_key" "_backward_relation_key","modelb"."id" "id" FROM "modelb" JOIN (SELECT "modela_id" "_backward_relation_key","modelb_id" "_forward_relation_key" FROM "modelb_modela" WHERE "modela_id" IN (075c03a6-b777-4044-876e-d5877478318d)) "sq0" ON "sq0"."_forward_relation_key"="modelb"."id"
Traceback (most recent call last):
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 63, in translate_exceptions_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 30, in retry_connection_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 174, in execute_query
return await connection.fetch(query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 421, in fetch
return await self._execute(query, args, 0, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1414, in _execute
query, args, limit, timeout, return_status=return_status)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1422, in __execute
return await self._do_execute(query, executor, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1434, in _do_execute
stmt = await self._get_statement(query, None)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File "asyncpg/protocol/protocol.pyx", line 163, in prepare
asyncpg.exceptions.PostgresSyntaxError: syntax error at or near "c03a6"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 50, in <module>
asyncio.run(main())
File "/usr/lib64/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib64/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "main.py", line 47, in main
await a.fetch_related("b_models")
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/models.py", line 422, in fetch_related
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 284, in fetch_for_list
await self._execute_prefetch_queries(instance_list)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 267, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 259, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 201, in _prefetch_m2m_relation
raw_results = await self.db.execute_query(query.get_sql())
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 65, in translate_exceptions_
raise OperationalError(exc)
tortoise.exceptions.OperationalError: syntax error at or near "c03a6"
|
asyncpg.exceptions.PostgresSyntaxError
|
async def add(self, *instances, using_db=None) -> None:
"""
Adds one or more of ``instances`` to the relation.
If it is already added, it will be silently ignored.
"""
if not instances:
return
if not self.instance._saved_in_db:
raise OperationalError(
"You should first call .save() on {model}".format(model=self.instance)
)
db = using_db if using_db else self.model._meta.db
pk_formatting_func = type(self.instance)._meta.pk.to_db_value
related_pk_formatting_func = type(instances[0])._meta.pk.to_db_value
through_table = Table(self.field.through)
select_query = (
db.query_class.from_(through_table)
.where(
getattr(through_table, self.field.backward_key)
== pk_formatting_func(self.instance.pk, self.instance)
)
.select(self.field.backward_key, self.field.forward_key)
)
query = db.query_class.into(through_table).columns(
getattr(through_table, self.field.forward_key),
getattr(through_table, self.field.backward_key),
)
if len(instances) == 1:
criterion = getattr(
through_table, self.field.forward_key
) == related_pk_formatting_func(instances[0].pk, instances[0])
else:
criterion = getattr(through_table, self.field.forward_key).isin(
[related_pk_formatting_func(i.pk, i) for i in instances]
)
select_query = select_query.where(criterion)
# TODO: This is highly inefficient. Should use UNIQUE index by default.
# And optionally allow duplicates.
already_existing_relations_raw = await db.execute_query(str(select_query))
already_existing_relations = {
(
pk_formatting_func(r[self.field.backward_key], self.instance),
related_pk_formatting_func(r[self.field.forward_key], self.instance),
)
for r in already_existing_relations_raw
}
insert_is_required = False
for instance_to_add in instances:
if not instance_to_add._saved_in_db:
raise OperationalError(
"You should first call .save() on {model}".format(model=instance_to_add)
)
pk_f = related_pk_formatting_func(instance_to_add.pk, instance_to_add)
pk_b = pk_formatting_func(self.instance.pk, self.instance)
if (pk_b, pk_f) in already_existing_relations:
continue
query = query.insert(pk_f, pk_b)
insert_is_required = True
if insert_is_required:
await db.execute_query(str(query))
|
async def add(self, *instances, using_db=None) -> None:
"""
Adds one or more of ``instances`` to the relation.
If it is already added, it will be silently ignored.
"""
if not instances:
return
if self.instance.pk is None:
raise OperationalError(
"You should first call .save() on {model}".format(model=self.instance)
)
db = using_db if using_db else self.model._meta.db
pk_formatting_func = type(self.instance)._meta.pk.to_db_value
related_pk_formatting_func = type(instances[0])._meta.pk.to_db_value
through_table = Table(self.field.through)
select_query = (
db.query_class.from_(through_table)
.where(
getattr(through_table, self.field.backward_key)
== pk_formatting_func(self.instance.pk, self.instance)
)
.select(self.field.backward_key, self.field.forward_key)
)
query = db.query_class.into(through_table).columns(
getattr(through_table, self.field.forward_key),
getattr(through_table, self.field.backward_key),
)
if len(instances) == 1:
criterion = getattr(
through_table, self.field.forward_key
) == related_pk_formatting_func(instances[0].pk, instances[0])
else:
criterion = getattr(through_table, self.field.forward_key).isin(
[related_pk_formatting_func(i.pk, i) for i in instances]
)
select_query = select_query.where(criterion)
already_existing_relations_raw = await db.execute_query(str(select_query))
already_existing_relations = {
(r[self.field.backward_key], r[self.field.forward_key])
for r in already_existing_relations_raw
}
insert_is_required = False
for instance_to_add in instances:
if instance_to_add.pk is None:
raise OperationalError(
"You should first call .save() on {model}".format(model=instance_to_add)
)
if (self.instance.pk, instance_to_add.pk) in already_existing_relations:
continue
query = query.insert(
related_pk_formatting_func(instance_to_add.pk, instance_to_add),
pk_formatting_func(self.instance.pk, self.instance),
)
insert_is_required = True
if insert_is_required:
await db.execute_query(str(query))
|
https://github.com/tortoise/tortoise-orm/issues/151
|
DEBUG:asyncio:Using selector: EpollSelector
INFO:tortoise:Tortoise-ORM startup
connections: {'default': {'engine': 'tortoise.backends.asyncpg', 'credentials': {'port': 5432, 'database': 'postgres', 'host': '172.20.0.2', 'user': 'postgres', 'password': 'postgres'}}}
apps: {'models': {'models': ['__main__'], 'default_connection': 'default'}}
DEBUG:db_client:Created connection <asyncpg.connection.Connection object at 0x7fd621f828b8> with params: {'host': '172.20.0.2', 'port': 5432, 'user': 'postgres', 'database': 'postgres'}
DEBUG:db_client:CREATE TABLE IF NOT EXISTS "modela" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb_modela" ("modelb_id" UUID NOT NULL REFERENCES "modelb" (id) ON DELETE CASCADE,"modela_id" UUID NOT NULL REFERENCES "modela" (id) ON DELETE CASCADE);
DEBUG:db_client:INSERT INTO "modela" ("id") VALUES ($1): ['075c03a6-b777-4044-876e-d5877478318d']
DEBUG:db_client:INSERT INTO "modelb" ("id") VALUES ($1): ['8a5ad9d6-fc30-4aba-a41b-377595cddb5f']
DEBUG:db_client:SELECT "modelb_id","modela_id" FROM "modelb_modela" WHERE "modelb_id"='8a5ad9d6-fc30-4aba-a41b-377595cddb5f' AND "modela_id"='075c03a6-b777-4044-876e-d5877478318d'
DEBUG:db_client:INSERT INTO "modelb_modela" ("modela_id","modelb_id") VALUES ('075c03a6-b777-4044-876e-d5877478318d','8a5ad9d6-fc30-4aba-a41b-377595cddb5f')
INFO:root:asyncpg result [<Record _backward_relation_key=UUID('075c03a6-b777-4044-876e-d5877478318d') id=UUID('8a5ad9d6-fc30-4aba-a41b-377595cddb5f')>]
DEBUG:db_client:SELECT "id" FROM "modela" WHERE "id"='075c03a6-b777-4044-876e-d5877478318d' LIMIT 2
DEBUG:db_client:SELECT "sq0"."_backward_relation_key" "_backward_relation_key","modelb"."id" "id" FROM "modelb" JOIN (SELECT "modela_id" "_backward_relation_key","modelb_id" "_forward_relation_key" FROM "modelb_modela" WHERE "modela_id" IN (075c03a6-b777-4044-876e-d5877478318d)) "sq0" ON "sq0"."_forward_relation_key"="modelb"."id"
Traceback (most recent call last):
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 63, in translate_exceptions_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 30, in retry_connection_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 174, in execute_query
return await connection.fetch(query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 421, in fetch
return await self._execute(query, args, 0, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1414, in _execute
query, args, limit, timeout, return_status=return_status)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1422, in __execute
return await self._do_execute(query, executor, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1434, in _do_execute
stmt = await self._get_statement(query, None)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File "asyncpg/protocol/protocol.pyx", line 163, in prepare
asyncpg.exceptions.PostgresSyntaxError: syntax error at or near "c03a6"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 50, in <module>
asyncio.run(main())
File "/usr/lib64/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib64/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "main.py", line 47, in main
await a.fetch_related("b_models")
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/models.py", line 422, in fetch_related
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 284, in fetch_for_list
await self._execute_prefetch_queries(instance_list)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 267, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 259, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 201, in _prefetch_m2m_relation
raw_results = await self.db.execute_query(query.get_sql())
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 65, in translate_exceptions_
raise OperationalError(exc)
tortoise.exceptions.OperationalError: syntax error at or near "c03a6"
|
asyncpg.exceptions.PostgresSyntaxError
|
def _set_field_values(self, values_map: Dict[str, Any]) -> Set[str]:
"""
Sets values for fields honoring type transformations and
return list of fields that were set additionally
"""
meta = self._meta
passed_fields = set()
for key, value in values_map.items():
if key in meta.fk_fields:
if not getattr(value, "_saved_in_db", False):
raise OperationalError(
"You should first call .save() on {} before referring to it".format(
value
)
)
field_object = meta.fields_map[key]
relation_field = field_object.source_field # type: str # type: ignore
setattr(self, relation_field, value.pk)
passed_fields.add(relation_field)
elif key in meta.fields:
field_object = meta.fields_map[key]
if value is None and not field_object.null:
raise ValueError(
"{} is non nullable field, but null was passed".format(key)
)
setattr(self, key, field_object.to_python_value(value))
elif key in meta.db_fields:
field_object = meta.fields_map[meta.fields_db_projection_reverse[key]]
if value is None and not field_object.null:
raise ValueError(
"{} is non nullable field, but null was passed".format(key)
)
setattr(self, key, field_object.to_python_value(value))
elif key in meta.backward_fk_fields:
raise ConfigurationError(
"You can't set backward relations through init, change related model instead"
)
elif key in meta.m2m_fields:
raise ConfigurationError(
"You can't set m2m relations through init, use m2m_manager instead"
)
return passed_fields
|
def _set_field_values(self, values_map: Dict[str, Any]) -> Set[str]:
"""
Sets values for fields honoring type transformations and
return list of fields that were set additionally
"""
meta = self._meta
passed_fields = set()
for key, value in values_map.items():
if key in meta.fk_fields:
if hasattr(value, "pk") and not value.pk:
raise OperationalError(
"You should first call .save() on {} before referring to it".format(
value
)
)
relation_field = "{}_id".format(key)
setattr(self, relation_field, value.pk)
passed_fields.add(relation_field)
elif key in meta.fields:
field_object = meta.fields_map[key]
if value is None and not field_object.null:
raise ValueError(
"{} is non nullable field, but null was passed".format(key)
)
setattr(self, key, field_object.to_python_value(value))
elif key in meta.db_fields:
field_object = meta.fields_map[meta.fields_db_projection_reverse[key]]
if value is None and not field_object.null:
raise ValueError(
"{} is non nullable field, but null was passed".format(key)
)
setattr(self, key, field_object.to_python_value(value))
elif key in meta.backward_fk_fields:
raise ConfigurationError(
"You can't set backward relations through init, change related model instead"
)
elif key in meta.m2m_fields:
raise ConfigurationError(
"You can't set m2m relations through init, use m2m_manager instead"
)
return passed_fields
|
https://github.com/tortoise/tortoise-orm/issues/151
|
DEBUG:asyncio:Using selector: EpollSelector
INFO:tortoise:Tortoise-ORM startup
connections: {'default': {'engine': 'tortoise.backends.asyncpg', 'credentials': {'port': 5432, 'database': 'postgres', 'host': '172.20.0.2', 'user': 'postgres', 'password': 'postgres'}}}
apps: {'models': {'models': ['__main__'], 'default_connection': 'default'}}
DEBUG:db_client:Created connection <asyncpg.connection.Connection object at 0x7fd621f828b8> with params: {'host': '172.20.0.2', 'port': 5432, 'user': 'postgres', 'database': 'postgres'}
DEBUG:db_client:CREATE TABLE IF NOT EXISTS "modela" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb_modela" ("modelb_id" UUID NOT NULL REFERENCES "modelb" (id) ON DELETE CASCADE,"modela_id" UUID NOT NULL REFERENCES "modela" (id) ON DELETE CASCADE);
DEBUG:db_client:INSERT INTO "modela" ("id") VALUES ($1): ['075c03a6-b777-4044-876e-d5877478318d']
DEBUG:db_client:INSERT INTO "modelb" ("id") VALUES ($1): ['8a5ad9d6-fc30-4aba-a41b-377595cddb5f']
DEBUG:db_client:SELECT "modelb_id","modela_id" FROM "modelb_modela" WHERE "modelb_id"='8a5ad9d6-fc30-4aba-a41b-377595cddb5f' AND "modela_id"='075c03a6-b777-4044-876e-d5877478318d'
DEBUG:db_client:INSERT INTO "modelb_modela" ("modela_id","modelb_id") VALUES ('075c03a6-b777-4044-876e-d5877478318d','8a5ad9d6-fc30-4aba-a41b-377595cddb5f')
INFO:root:asyncpg result [<Record _backward_relation_key=UUID('075c03a6-b777-4044-876e-d5877478318d') id=UUID('8a5ad9d6-fc30-4aba-a41b-377595cddb5f')>]
DEBUG:db_client:SELECT "id" FROM "modela" WHERE "id"='075c03a6-b777-4044-876e-d5877478318d' LIMIT 2
DEBUG:db_client:SELECT "sq0"."_backward_relation_key" "_backward_relation_key","modelb"."id" "id" FROM "modelb" JOIN (SELECT "modela_id" "_backward_relation_key","modelb_id" "_forward_relation_key" FROM "modelb_modela" WHERE "modela_id" IN (075c03a6-b777-4044-876e-d5877478318d)) "sq0" ON "sq0"."_forward_relation_key"="modelb"."id"
Traceback (most recent call last):
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 63, in translate_exceptions_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 30, in retry_connection_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 174, in execute_query
return await connection.fetch(query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 421, in fetch
return await self._execute(query, args, 0, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1414, in _execute
query, args, limit, timeout, return_status=return_status)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1422, in __execute
return await self._do_execute(query, executor, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1434, in _do_execute
stmt = await self._get_statement(query, None)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File "asyncpg/protocol/protocol.pyx", line 163, in prepare
asyncpg.exceptions.PostgresSyntaxError: syntax error at or near "c03a6"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 50, in <module>
asyncio.run(main())
File "/usr/lib64/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib64/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "main.py", line 47, in main
await a.fetch_related("b_models")
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/models.py", line 422, in fetch_related
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 284, in fetch_for_list
await self._execute_prefetch_queries(instance_list)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 267, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 259, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 201, in _prefetch_m2m_relation
raw_results = await self.db.execute_query(query.get_sql())
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 65, in translate_exceptions_
raise OperationalError(exc)
tortoise.exceptions.OperationalError: syntax error at or near "c03a6"
|
asyncpg.exceptions.PostgresSyntaxError
|
def _make_query(self):
table = Table(self.model._meta.table)
self.query = self._db.query_class.update(table)
self.resolve_filters(
model=self.model,
q_objects=self.q_objects,
annotations=self.annotations,
custom_filters=self.custom_filters,
)
# Need to get executor to get correct column_map
executor = self._db.executor_class(model=self.model, db=self._db)
for key, value in self.update_kwargs.items():
field_object = self.model._meta.fields_map.get(key)
if not field_object:
raise FieldError(
"Unknown keyword argument {} for model {}".format(key, self.model)
)
if field_object.generated:
raise IntegrityError(
"Field {} is generated and can not be updated".format(key)
)
if key in self.model._meta.db_fields:
db_field = self.model._meta.fields_db_projection[key]
value = executor.column_map[key](value, None)
elif isinstance(field_object, fields.ForeignKeyField):
db_field = field_object.source_field
value = executor.column_map[db_field](value.id, None)
else:
raise FieldError("Field {} is virtual and can not be updated".format(key))
self.query = self.query.set(db_field, value)
|
def _make_query(self):
table = Table(self.model._meta.table)
self.query = self._db.query_class.update(table)
self.resolve_filters(
model=self.model,
q_objects=self.q_objects,
annotations=self.annotations,
custom_filters=self.custom_filters,
)
# Need to get executor to get correct column_map
executor = self._db.executor_class(model=self.model, db=self._db)
for key, value in self.update_kwargs.items():
field_object = self.model._meta.fields_map.get(key)
if not field_object:
raise FieldError(
"Unknown keyword argument {} for model {}".format(key, self.model)
)
if field_object.generated:
raise IntegrityError(
"Field {} is generated and can not be updated".format(key)
)
if key in self.model._meta.db_fields:
db_field = self.model._meta.fields_db_projection[key]
value = executor.column_map[key](value, None)
elif isinstance(field_object, fields.ForeignKeyField):
db_field = "{}_id".format(key)
value = value.id
else:
raise FieldError("Field {} is virtual and can not be updated".format(key))
self.query = self.query.set(db_field, value)
|
https://github.com/tortoise/tortoise-orm/issues/151
|
DEBUG:asyncio:Using selector: EpollSelector
INFO:tortoise:Tortoise-ORM startup
connections: {'default': {'engine': 'tortoise.backends.asyncpg', 'credentials': {'port': 5432, 'database': 'postgres', 'host': '172.20.0.2', 'user': 'postgres', 'password': 'postgres'}}}
apps: {'models': {'models': ['__main__'], 'default_connection': 'default'}}
DEBUG:db_client:Created connection <asyncpg.connection.Connection object at 0x7fd621f828b8> with params: {'host': '172.20.0.2', 'port': 5432, 'user': 'postgres', 'database': 'postgres'}
DEBUG:db_client:CREATE TABLE IF NOT EXISTS "modela" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb" ("id" UUID NOT NULL PRIMARY KEY); CREATE TABLE IF NOT EXISTS "modelb_modela" ("modelb_id" UUID NOT NULL REFERENCES "modelb" (id) ON DELETE CASCADE,"modela_id" UUID NOT NULL REFERENCES "modela" (id) ON DELETE CASCADE);
DEBUG:db_client:INSERT INTO "modela" ("id") VALUES ($1): ['075c03a6-b777-4044-876e-d5877478318d']
DEBUG:db_client:INSERT INTO "modelb" ("id") VALUES ($1): ['8a5ad9d6-fc30-4aba-a41b-377595cddb5f']
DEBUG:db_client:SELECT "modelb_id","modela_id" FROM "modelb_modela" WHERE "modelb_id"='8a5ad9d6-fc30-4aba-a41b-377595cddb5f' AND "modela_id"='075c03a6-b777-4044-876e-d5877478318d'
DEBUG:db_client:INSERT INTO "modelb_modela" ("modela_id","modelb_id") VALUES ('075c03a6-b777-4044-876e-d5877478318d','8a5ad9d6-fc30-4aba-a41b-377595cddb5f')
INFO:root:asyncpg result [<Record _backward_relation_key=UUID('075c03a6-b777-4044-876e-d5877478318d') id=UUID('8a5ad9d6-fc30-4aba-a41b-377595cddb5f')>]
DEBUG:db_client:SELECT "id" FROM "modela" WHERE "id"='075c03a6-b777-4044-876e-d5877478318d' LIMIT 2
DEBUG:db_client:SELECT "sq0"."_backward_relation_key" "_backward_relation_key","modelb"."id" "id" FROM "modelb" JOIN (SELECT "modela_id" "_backward_relation_key","modelb_id" "_forward_relation_key" FROM "modelb_modela" WHERE "modela_id" IN (075c03a6-b777-4044-876e-d5877478318d)) "sq0" ON "sq0"."_forward_relation_key"="modelb"."id"
Traceback (most recent call last):
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 63, in translate_exceptions_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 30, in retry_connection_
return await func(self, *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 174, in execute_query
return await connection.fetch(query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 421, in fetch
return await self._execute(query, args, 0, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1414, in _execute
query, args, limit, timeout, return_status=return_status)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1422, in __execute
return await self._do_execute(query, executor, timeout)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 1434, in _do_execute
stmt = await self._get_statement(query, None)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/asyncpg/connection.py", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File "asyncpg/protocol/protocol.pyx", line 163, in prepare
asyncpg.exceptions.PostgresSyntaxError: syntax error at or near "c03a6"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 50, in <module>
asyncio.run(main())
File "/usr/lib64/python3.7/asyncio/runners.py", line 43, in run
return loop.run_until_complete(main)
File "/usr/lib64/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "main.py", line 47, in main
await a.fetch_related("b_models")
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/models.py", line 422, in fetch_related
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 284, in fetch_for_list
await self._execute_prefetch_queries(instance_list)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 267, in _execute_prefetch_queries
await self._do_prefetch(instance_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 259, in _do_prefetch
return await self._prefetch_m2m_relation(instance_id_list, field, related_query)
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/base/executor.py", line 201, in _prefetch_m2m_relation
raw_results = await self.db.execute_query(query.get_sql())
File "/home/nik/.virtualenvs/tortoise-pk-error/lib/python3.7/site-packages/tortoise/backends/asyncpg/client.py", line 65, in translate_exceptions_
raise OperationalError(exc)
tortoise.exceptions.OperationalError: syntax error at or near "c03a6"
|
asyncpg.exceptions.PostgresSyntaxError
|
def retry_connection(func):
@wraps(func)
async def retry_connection_(self, *args):
try:
return await func(self, *args)
except (
asyncpg.PostgresConnectionError,
asyncpg.ConnectionDoesNotExistError,
asyncpg.ConnectionFailureError,
asyncpg.InterfaceError,
):
# Here we assume that a connection error has happened
# Re-create connection and re-try the function call once only.
if getattr(self, "transaction", None):
self._finalized = True
raise TransactionManagementError(
"Connection gone away during transaction"
)
await self._lock.acquire()
logging.info("Attempting reconnect")
try:
await self._close()
await self.create_connection(with_db=True)
logging.info("Reconnected")
except Exception as e:
logging.info("Failed to reconnect: %s", str(e))
raise
finally:
self._lock.release()
return await func(self, *args)
return retry_connection_
|
def retry_connection(func):
@wraps(func)
async def wrapped(self, *args):
try:
return await func(self, *args)
except (
asyncpg.PostgresConnectionError,
asyncpg.ConnectionDoesNotExistError,
asyncpg.ConnectionFailureError,
):
# Here we assume that a connection error has happened
# Re-create connection and re-try the function call once only.
await self._lock.acquire()
logging.info("Attempting reconnect")
try:
await self._close()
await self.create_connection(with_db=True)
logging.info("Reconnected")
except Exception:
logging.info("Failed to reconnect")
finally:
self._lock.release()
return await func(self, *args)
return wrapped
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def translate_exceptions(func):
@wraps(func)
async def translate_exceptions_(self, *args):
try:
return await func(self, *args)
except asyncpg.SyntaxOrAccessError as exc:
raise OperationalError(exc)
except asyncpg.IntegrityConstraintViolationError as exc:
raise IntegrityError(exc)
return translate_exceptions_
|
def translate_exceptions(func):
@wraps(func)
async def wrapped(self, *args):
try:
return await func(self, *args)
except asyncpg.SyntaxOrAccessError as exc:
raise OperationalError(exc)
except asyncpg.IntegrityConstraintViolationError as exc:
raise IntegrityError(exc)
return wrapped
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def _in_transaction(self) -> "TransactionWrapper":
return self._transaction_class(self)
|
def _in_transaction(self) -> "TransactionWrapper":
return self._transaction_class(self.connection_name, self._connection, self._lock)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def __init__(self, connection) -> None:
self._connection = connection._connection
self._lock = connection._lock
self.log = logging.getLogger("db_client")
self._transaction_class = self.__class__
self._old_context_value = None
self.connection_name = connection.connection_name
self.transaction = None
self._finalized = False
self._parent = connection
|
def __init__(self, connection_name: str, connection, lock) -> None:
self._connection = connection
self._lock = lock
self.log = logging.getLogger("db_client")
self._transaction_class = self.__class__
self._old_context_value = None
self.connection_name = connection_name
self.transaction = None
self._finalized = False
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def commit(self):
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self.transaction.commit()
self.release()
|
async def commit(self):
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
self._finalized = True
await self.transaction.commit()
current_transaction_map[self.connection_name].set(self._old_context_value)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def rollback(self):
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self.transaction.rollback()
self.release()
|
async def rollback(self):
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
self._finalized = True
await self.transaction.rollback()
current_transaction_map[self.connection_name].set(self._old_context_value)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def create_connection(self, with_db: bool) -> None:
await self._parent.create_connection(with_db)
self._connection = self._parent._connection
|
async def create_connection(self, with_db: bool) -> None:
self._template = {
"host": self.host,
"port": self.port,
"user": self.user,
"database": self.database if with_db else None,
**self.extra,
}
try:
self._connection = await asyncpg.connect(
None, password=self.password, **self._template
)
self.log.debug(
"Created connection %s with params: %s", self._connection, self._template
)
except asyncpg.InvalidCatalogNameError:
raise DBConnectionError(
"Can't establish connection to database {}".format(self.database)
)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def _close(self) -> None:
await self._parent._close()
self._connection = self._parent._connection
|
async def _close(self) -> None:
if self._connection: # pragma: nobranch
await self._connection.close()
self.log.debug(
"Closed connection %s with params: %s", self._connection, self._template
)
self._template.clear()
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def __init__(
self,
dialect: str,
*,
# Is the connection a Daemon?
daemon: bool = True,
# Deficiencies to work around:
safe_indexes: bool = True,
requires_limit: bool = False,
) -> None:
super().__setattr__("_mutable", True)
self.dialect = dialect
self.daemon = daemon
self.requires_limit = requires_limit
self.safe_indexes = safe_indexes
super().__setattr__("_mutable", False)
|
def __init__(
self,
dialect: str,
*,
# Deficiencies to work around:
safe_indexes: bool = True,
requires_limit: bool = False,
) -> None:
super().__setattr__("_mutable", True)
self.dialect = dialect
self.requires_limit = requires_limit
self.safe_indexes = safe_indexes
super().__setattr__("_mutable", False)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
if exc_type:
if issubclass(exc_type, TransactionManagementError):
self.release()
else:
await self.rollback()
else:
await self.commit()
|
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
if exc_type:
await self.rollback()
else:
await self.commit()
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def retry_connection(func):
@wraps(func)
async def retry_connection_(self, *args):
try:
return await func(self, *args)
except (
RuntimeError,
pymysql.err.OperationalError,
pymysql.err.InternalError,
pymysql.err.InterfaceError,
):
# Here we assume that a connection error has happened
# Re-create connection and re-try the function call once only.
if getattr(self, "_finalized", None) is False:
raise TransactionManagementError(
"Connection gone away during transaction"
)
await self._lock.acquire()
logging.info("Attempting reconnect")
try:
await self._close()
await self.create_connection(with_db=True)
logging.info("Reconnected")
except Exception as e:
logging.info("Failed to reconnect: %s", str(e))
raise
finally:
self._lock.release()
return await func(self, *args)
return retry_connection_
|
def retry_connection(func):
@wraps(func)
async def wrapped(self, *args):
try:
return await func(self, *args)
except (
RuntimeError,
pymysql.err.OperationalError,
pymysql.err.InternalError,
pymysql.err.InterfaceError,
):
# Here we assume that a connection error has happened
# Re-create connection and re-try the function call once only.
await self._lock.acquire()
logging.info("Attempting reconnect")
try:
self._close()
await self.create_connection(with_db=True)
logging.info("Reconnected")
except Exception:
logging.info("Failed to reconnect")
finally:
self._lock.release()
return await func(self, *args)
return wrapped
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def translate_exceptions(func):
@wraps(func)
async def translate_exceptions_(self, *args):
try:
return await func(self, *args)
except (
pymysql.err.OperationalError,
pymysql.err.ProgrammingError,
pymysql.err.DataError,
pymysql.err.InternalError,
pymysql.err.NotSupportedError,
) as exc:
raise OperationalError(exc)
except pymysql.err.IntegrityError as exc:
raise IntegrityError(exc)
return translate_exceptions_
|
def translate_exceptions(func):
@wraps(func)
async def wrapped(self, *args):
try:
return await func(self, *args)
except (
pymysql.err.OperationalError,
pymysql.err.ProgrammingError,
pymysql.err.DataError,
pymysql.err.InternalError,
pymysql.err.NotSupportedError,
) as exc:
raise OperationalError(exc)
except pymysql.err.IntegrityError as exc:
raise IntegrityError(exc)
return wrapped
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def _close(self) -> None:
if self._connection: # pragma: nobranch
self._connection.close()
self.log.debug(
"Closed connection %s with params: %s", self._connection, self._template
)
self._template.clear()
|
def _close(self) -> None:
if self._connection: # pragma: nobranch
self._connection.close()
self.log.debug(
"Closed connection %s with params: %s", self._connection, self._template
)
self._template.clear()
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def close(self) -> None:
await self._close()
self._connection = None
|
async def close(self) -> None:
self._close()
self._connection = None
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def _in_transaction(self):
return self._transaction_class(self)
|
def _in_transaction(self):
return self._transaction_class(self.connection_name, self._connection, self._lock)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def __init__(self, connection):
self.connection_name = connection.connection_name
self._connection = connection._connection # type: aiomysql.Connection
self._lock = connection._lock
self.log = logging.getLogger("db_client")
self._transaction_class = self.__class__
self._finalized = None # type: Optional[bool]
self._old_context_value = None
self._parent = connection
self.transaction = None
|
def __init__(self, connection_name, connection, lock):
self.connection_name = connection_name
self._connection = connection
self._lock = lock
self.log = logging.getLogger("db_client")
self._transaction_class = self.__class__
self._finalized = False
self._old_context_value = None
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def start(self):
await self._connection.begin()
self._finalized = False
current_transaction = current_transaction_map[self.connection_name]
self._old_context_value = current_transaction.get()
current_transaction.set(self)
|
async def start(self):
await self._connection.begin()
current_transaction = current_transaction_map[self.connection_name]
self._old_context_value = current_transaction.get()
current_transaction.set(self)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def commit(self) -> None:
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self._connection.commit()
self.release()
|
async def commit(self):
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
self._finalized = True
await self._connection.commit()
current_transaction_map[self.connection_name].set(self._old_context_value)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def rollback(self) -> None:
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self._connection.rollback()
self.release()
|
async def rollback(self):
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
self._finalized = True
await self._connection.rollback()
current_transaction_map[self.connection_name].set(self._old_context_value)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def create_connection(self, with_db: bool) -> None:
await self._parent.create_connection(with_db)
self._connection = self._parent._connection
|
async def create_connection(self, with_db: bool) -> None:
self._template = {
"host": self.host,
"port": self.port,
"user": self.user,
"db": self.database if with_db else None,
"autocommit": True,
**self.extra,
}
try:
self._connection = await aiomysql.connect(
password=self.password, **self._template
)
self.log.debug(
"Created connection %s with params: %s", self._connection, self._template
)
except pymysql.err.OperationalError:
raise DBConnectionError(
"Can't connect to MySQL server: {template}".format(template=self._template)
)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def translate_exceptions(func):
@wraps(func)
async def translate_exceptions_(self, query, *args):
try:
return await func(self, query, *args)
except sqlite3.OperationalError as exc:
raise OperationalError(exc)
except sqlite3.IntegrityError as exc:
raise IntegrityError(exc)
return translate_exceptions_
|
def translate_exceptions(func):
@wraps(func)
async def wrapped(self, query, *args):
try:
return await func(self, query, *args)
except sqlite3.OperationalError as exc:
raise OperationalError(exc)
except sqlite3.IntegrityError as exc:
raise IntegrityError(exc)
return wrapped
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def rollback(self) -> None:
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self._connection.rollback()
self.release()
|
async def rollback(self) -> None:
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
self._finalized = True
await self._connection.rollback()
current_transaction_map[self.connection_name].set(self._old_context_value)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
async def commit(self) -> None:
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
await self._connection.commit()
self.release()
|
async def commit(self) -> None:
if self._finalized:
raise TransactionManagementError("Transaction already finalised")
self._finalized = True
await self._connection.commit()
current_transaction_map[self.connection_name].set(self._old_context_value)
|
https://github.com/tortoise/tortoise-orm/issues/134
|
Traceback (most recent call last):
File \"/usr/lib/python3.6/asyncio/selector_events.py\", line 714, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 277, in create
await instance.save(using_db=kwargs.get('using_db'))
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 221, in save
await self._insert_instance(*args, **kwargs)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/models.py\", line 210, in _insert_instance
).execute_insert(self)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/base/executor.py\", line 77, in execute_insert
instance.id = await self.db.execute_insert(query, values)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 22, in wrapped
return await func(self, query, *args)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/tortoise/backends/asyncpg/client.py\", line 108, in execute_insert
stmt = await connection.prepare(query)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 403, in prepare
return await self._prepare(query, timeout=timeout, use_cache=False)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 408, in _prepare
use_cache=use_cache)
File \"/home/shlomy/PycharmProjects/playground/venv36/lib/python3.6/site-packages/asyncpg/connection.py\", line 329, in _get_statement
statement = await self._protocol.prepare(stmt_name, query, timeout)
File \"asyncpg/protocol/protocol.pyx\", line 163, in prepare
asyncpg.exceptions.ConnectionDoesNotExistError: connection was closed in the middle of operation"}
|
ConnectionResetError
|
def render(self, request):
"""
Render the resource. This will takeover the transport underlying
the request, create a :class:`autobahn.twisted.websocket.WebSocketServerProtocol`
and let that do any subsequent communication.
"""
# for reasons unknown, the transport is already None when the
# request is over HTTP2. request.channel.getPeer() is valid at
# this point however
if request.channel.transport is None:
# render an "error, you're doing HTTPS over WSS" webpage
from autobahn.websocket import protocol
request.setResponseCode(426, b"Upgrade required")
# RFC says MUST set upgrade along with 426 code:
# https://tools.ietf.org/html/rfc7231#section-6.5.15
request.setHeader(b"Upgrade", b"WebSocket")
html = protocol._SERVER_STATUS_TEMPLATE % ("", protocol.__version__)
return html.encode("utf8")
# Create Autobahn WebSocket protocol.
#
protocol = self._factory.buildProtocol(request.transport.getPeer())
if not protocol:
# If protocol creation fails, we signal "internal server error"
request.setResponseCode(500)
return b""
# Take over the transport from Twisted Web
#
transport, request.channel.transport = request.channel.transport, None
# Connect the transport to our protocol. Once #3204 is fixed, there
# may be a cleaner way of doing this.
# http://twistedmatrix.com/trac/ticket/3204
#
if isinstance(transport, ProtocolWrapper):
# i.e. TLS is a wrapping protocol
transport.wrappedProtocol = protocol
else:
transport.protocol = protocol
protocol.makeConnection(transport)
# On Twisted 16+, the transport is paused whilst the existing
# request is served; there won't be any requests after us so
# we can just resume this ourselves.
# 17.1 version
if hasattr(transport, "_networkProducer"):
transport._networkProducer.resumeProducing()
# 16.x version
elif hasattr(transport, "resumeProducing"):
transport.resumeProducing()
# We recreate the request and forward the raw data. This is somewhat
# silly (since Twisted Web already did the HTTP request parsing
# which we will do a 2nd time), but it's totally non-invasive to our
# code. Maybe improve this.
#
if PY3:
data = request.method + b" " + request.uri + b" HTTP/1.1\x0d\x0a"
for h in request.requestHeaders.getAllRawHeaders():
data += h[0] + b": " + b",".join(h[1]) + b"\x0d\x0a"
data += b"\x0d\x0a"
data += request.content.read()
else:
data = "%s %s HTTP/1.1\x0d\x0a" % (request.method, request.uri)
for h in request.requestHeaders.getAllRawHeaders():
data += "%s: %s\x0d\x0a" % (h[0], ",".join(h[1]))
data += "\x0d\x0a"
protocol.dataReceived(data)
return NOT_DONE_YET
|
def render(self, request):
"""
Render the resource. This will takeover the transport underlying
the request, create a :class:`autobahn.twisted.websocket.WebSocketServerProtocol`
and let that do any subsequent communication.
"""
# Create Autobahn WebSocket protocol.
#
protocol = self._factory.buildProtocol(request.transport.getPeer())
if not protocol:
# If protocol creation fails, we signal "internal server error"
request.setResponseCode(500)
return b""
# Take over the transport from Twisted Web
#
transport, request.channel.transport = request.channel.transport, None
# Connect the transport to our protocol. Once #3204 is fixed, there
# may be a cleaner way of doing this.
# http://twistedmatrix.com/trac/ticket/3204
#
if isinstance(transport, ProtocolWrapper):
# i.e. TLS is a wrapping protocol
transport.wrappedProtocol = protocol
else:
transport.protocol = protocol
protocol.makeConnection(transport)
# On Twisted 16+, the transport is paused whilst the existing
# request is served; there won't be any requests after us so
# we can just resume this ourselves.
# 17.1 version
if hasattr(transport, "_networkProducer"):
transport._networkProducer.resumeProducing()
# 16.x version
elif hasattr(transport, "resumeProducing"):
transport.resumeProducing()
# We recreate the request and forward the raw data. This is somewhat
# silly (since Twisted Web already did the HTTP request parsing
# which we will do a 2nd time), but it's totally non-invasive to our
# code. Maybe improve this.
#
if PY3:
data = request.method + b" " + request.uri + b" HTTP/1.1\x0d\x0a"
for h in request.requestHeaders.getAllRawHeaders():
data += h[0] + b": " + b",".join(h[1]) + b"\x0d\x0a"
data += b"\x0d\x0a"
data += request.content.read()
else:
data = "%s %s HTTP/1.1\x0d\x0a" % (request.method, request.uri)
for h in request.requestHeaders.getAllRawHeaders():
data += "%s: %s\x0d\x0a" % (h[0], ",".join(h[1]))
data += "\x0d\x0a"
protocol.dataReceived(data)
return NOT_DONE_YET
|
https://github.com/crossbario/autobahn-python/issues/1218
|
Jul 03 14:39:28 <redacted> unbuffer[2114]: Traceback (most recent call last):
Jul 03 14:39:28 <redacted> unbuffer[2114]: File "/home/ubuntu/cpy373_1/lib/python3.7/site-packages/twisted/web/_http2.py", line 186, in dataReceived
Jul 03 14:39:28 <redacted> unbuffer[2114]: self._requestEnded(event)
Jul 03 14:39:28 <redacted> unbuffer[2114]: File "/home/ubuntu/cpy373_1/lib/python3.7/site-packages/twisted/web/_http2.py", line 495, in _requestEnded
Jul 03 14:39:28 <redacted> unbuffer[2114]: stream.requestComplete()
Jul 03 14:39:28 <redacted> unbuffer[2114]: File "/home/ubuntu/cpy373_1/lib/python3.7/site-packages/twisted/web/_http2.py", line 992, in requestComplete
Jul 03 14:39:28 <redacted> unbuffer[2114]: self._request.requestReceived(self.command, self.path, b'HTTP/2')
Jul 03 14:39:28 <redacted> unbuffer[2114]: File "/home/ubuntu/cpy373_1/lib/python3.7/site-packages/twisted/web/http.py", line 920, in requestReceived
Jul 03 14:39:28 <redacted> unbuffer[2114]: self.process()
Jul 03 14:39:28 <redacted> unbuffer[2114]: --- <exception caught here> ---
Jul 03 14:39:28 <redacted> unbuffer[2114]: File "/home/ubuntu/cpy373_1/lib/python3.7/site-packages/twisted/web/server.py", line 199, in process
Jul 03 14:39:28 <redacted> unbuffer[2114]: self.render(resrc)
Jul 03 14:39:28 <redacted> unbuffer[2114]: File "/home/ubuntu/cpy373_1/lib/python3.7/site-packages/twisted/web/server.py", line 259, in render
Jul 03 14:39:28 <redacted> unbuffer[2114]: body = resrc.render(self)
Jul 03 14:39:28 <redacted> unbuffer[2114]: File "/home/ubuntu/scm/crossbario/autobahn-python/autobahn/twisted/resource.py", line 121, in render
Jul 03 14:39:28 <redacted> unbuffer[2114]: protocol = self._factory.buildProtocol(request.transport.getPeer())
Jul 03 14:39:28 <redacted> unbuffer[2114]: builtins.AttributeError: 'NoneType' object has no attribute 'getPeer'
|
builtins.AttributeError
|
def _wrap_connection_future(self, transport, done, conn_f):
def on_connect_success(result):
# async connect call returns a 2-tuple
transport, proto = result
# in the case where we .abort() the transport / connection
# during setup, we still get on_connect_success but our
# transport is already closed (this will happen if
# e.g. there's an "open handshake timeout") -- I don't
# know if there's a "better" way to detect this? #python
# doesn't know of one, anyway
if transport.is_closing():
if not txaio.is_called(done):
reason = getattr(proto, "_onclose_reason", "Connection already closed")
txaio.reject(done, TransportLost(reason))
return
# if e.g. an SSL handshake fails, we will have
# successfully connected (i.e. get here) but need to
# 'listen' for the "connection_lost" from the underlying
# protocol in case of handshake failure .. so we wrap
# it. Also, we don't increment transport.success_count
# here on purpose (because we might not succeed).
# XXX double-check that asyncio behavior on TLS handshake
# failures is in fact as described above
orig = proto.connection_lost
@wraps(orig)
def lost(fail):
rtn = orig(fail)
if not txaio.is_called(done):
# asyncio will call connection_lost(None) in case of
# a transport failure, in which case we create an
# appropriate exception
if fail is None:
fail = TransportLost("failed to complete connection")
txaio.reject(done, fail)
return rtn
proto.connection_lost = lost
def on_connect_failure(err):
transport.connect_failures += 1
# failed to establish a connection in the first place
txaio.reject(done, err)
txaio.add_callbacks(conn_f, on_connect_success, None)
# the errback is added as a second step so it gets called if
# there as an error in on_connect_success itself.
txaio.add_callbacks(conn_f, None, on_connect_failure)
return conn_f
|
def _wrap_connection_future(self, transport, done, conn_f):
def on_connect_success(result):
# async connect call returns a 2-tuple
transport, proto = result
# if e.g. an SSL handshake fails, we will have
# successfully connected (i.e. get here) but need to
# 'listen' for the "connection_lost" from the underlying
# protocol in case of handshake failure .. so we wrap
# it. Also, we don't increment transport.success_count
# here on purpose (because we might not succeed).
# XXX double-check that asyncio behavior on TLS handshake
# failures is in fact as described above
orig = proto.connection_lost
@wraps(orig)
def lost(fail):
rtn = orig(fail)
if not txaio.is_called(done):
# asyncio will call connection_lost(None) in case of
# a transport failure, in which case we create an
# appropriate exception
if fail is None:
fail = TransportLost("failed to complete connection")
txaio.reject(done, fail)
return rtn
proto.connection_lost = lost
def on_connect_failure(err):
transport.connect_failures += 1
# failed to establish a connection in the first place
txaio.reject(done, err)
txaio.add_callbacks(conn_f, on_connect_success, None)
# the errback is added as a second step so it gets called if
# there as an error in on_connect_success itself.
txaio.add_callbacks(conn_f, None, on_connect_failure)
return conn_f
|
https://github.com/crossbario/autobahn-python/issues/1153
|
2019-03-25 15:29:00.597 13776 DEBUG autobahn.asyncio.component.Component [-] component failed: TransportLost: failed to complete connection _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.609 13776 DEBUG autobahn.asyncio.component.Component [-] Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
autobahn.wamp.exception.TransportLost: failed to complete connection
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.615 13776 ERROR autobahn.asyncio.component.Component [-] Connection failed: TransportLost: failed to complete connection: autobahn.wamp.exception.TransportLost: failed to complete connection
2019-03-25 15:29:00.624 13776 DEBUG autobahn.asyncio.component.Component [-] Entering re-connect loop _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.631 13776 DEBUG autobahn.asyncio.component.Component [-] trying transport 0 using connect delay 25.279705208019543 _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:25.946 13776 INFO autobahn.asyncio.component.Component [-] connecting once using transport type "websocket" over endpoint "tcp"
2019-03-25 15:29:29.403 13776 DEBUG autobahn.asyncio.component.Component [-] component failed: ConnectionResetError: [Errno 104] Connection reset by peer _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.448 13776 DEBUG autobahn.asyncio.component.Component [-] Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 239, in _step
result = coro.send(None)
File "/usr/lib/python3.5/asyncio/tasks.py", line 400, in wait_for
return fut.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 241, in _step
result = coro.throw(exc)
File "/usr/lib/python3.5/asyncio/base_events.py", line 801, in create_connection
sock, protocol_factory, ssl, server_hostname)
File "/usr/lib/python3.5/asyncio/base_events.py", line 827, in _create_connection_transport
yield from waiter
File "/usr/lib/python3.5/asyncio/futures.py", line 380, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.5/asyncio/tasks.py", line 304, in _wakeup
future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/selector_events.py", line 723, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.454 13776 INFO autobahn.asyncio.component.Component [-] Connection failed: ConnectionResetError: [Errno 104] Connection reset by peer
2019-03-25 15:29:29.471 13776 DEBUG autobahn.asyncio.component.Component [-] Entering re-connect loop _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.480 13776 DEBUG autobahn.asyncio.component.Component [-] trying transport 0 using connect delay 34.52392114857189 _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:04.060 13776 INFO autobahn.asyncio.component.Component [-] connecting once using transport type "websocket" over endpoint "tcp"
2019-03-25 15:30:08.150 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-]
[('logOctets', False, 'WampWebSocketClientFactory'),
('logFrames', False, 'WampWebSocketClientFactory'),
('trackTimings', False, 'WampWebSocketClientFactory'),
('utf8validateIncoming', True, 'WampWebSocketClientFactory'),
('applyMask', True, 'WampWebSocketClientFactory'),
('maxFramePayloadSize', 0, 'WampWebSocketClientFactory'),
('maxMessagePayloadSize', 0, 'WampWebSocketClientFactory'),
('autoFragmentSize', 0, 'WampWebSocketClientFactory'),
('failByDrop', True, 'WampWebSocketClientFactory'),
('echoCloseCodeReason', False, 'WampWebSocketClientFactory'),
('openHandshakeTimeout', 5, 'WampWebSocketClientFactory'),
('closeHandshakeTimeout', 1, 'WampWebSocketClientFactory'),
('tcpNoDelay', True, 'WampWebSocketClientFactory'),
('autoPingInterval', 0, 'WampWebSocketClientFactory'),
('autoPingTimeout', 0, 'WampWebSocketClientFactory'),
('autoPingSize', 4, 'WampWebSocketClientFactory'),
('version', 18, 'WampWebSocketClientFactory'),
('acceptMaskedServerFrames', False, 'WampWebSocketClientFactory'),
('maskClientFrames', True, 'WampWebSocketClientFactory'),
('serverConnectionDropTimeout', 1, 'WampWebSocketClientFactory'),
('perMessageCompressionOffers', [], 'WampWebSocketClientFactory'),
('perMessageCompressionAccept',
<function WebSocketClientFactory.resetProtocolOptions.<locals>.<lambda> at 0xb3d51300>,
'WampWebSocketClientFactory')] _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.163 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] connection to tcp:51.75.29.206:8181 established _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.180 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] GET / HTTP/1.1
User-Agent: AutobahnPython/19.3.3
Host: <OUR-CROSSBAR-SERVER-IP>:8181
Upgrade: WebSocket
Connection: Upgrade
Pragma: no-cache
Cache-Control: no-cache
Sec-WebSocket-Key: 8eiSBL+/0+X97Hz9R5+JhQ==
Sec-WebSocket-Protocol: wamp.2.json
Sec-WebSocket-Version: 13
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.191 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] _connectionLost: [Errno 104] Connection reset by peer _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:12.311 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] skipping opening handshake timeout: WebSocket connection is already closed _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
|
ConnectionResetError
|
def on_connect_success(result):
# async connect call returns a 2-tuple
transport, proto = result
# in the case where we .abort() the transport / connection
# during setup, we still get on_connect_success but our
# transport is already closed (this will happen if
# e.g. there's an "open handshake timeout") -- I don't
# know if there's a "better" way to detect this? #python
# doesn't know of one, anyway
if transport.is_closing():
if not txaio.is_called(done):
reason = getattr(proto, "_onclose_reason", "Connection already closed")
txaio.reject(done, TransportLost(reason))
return
# if e.g. an SSL handshake fails, we will have
# successfully connected (i.e. get here) but need to
# 'listen' for the "connection_lost" from the underlying
# protocol in case of handshake failure .. so we wrap
# it. Also, we don't increment transport.success_count
# here on purpose (because we might not succeed).
# XXX double-check that asyncio behavior on TLS handshake
# failures is in fact as described above
orig = proto.connection_lost
@wraps(orig)
def lost(fail):
rtn = orig(fail)
if not txaio.is_called(done):
# asyncio will call connection_lost(None) in case of
# a transport failure, in which case we create an
# appropriate exception
if fail is None:
fail = TransportLost("failed to complete connection")
txaio.reject(done, fail)
return rtn
proto.connection_lost = lost
|
def on_connect_success(result):
# async connect call returns a 2-tuple
transport, proto = result
# if e.g. an SSL handshake fails, we will have
# successfully connected (i.e. get here) but need to
# 'listen' for the "connection_lost" from the underlying
# protocol in case of handshake failure .. so we wrap
# it. Also, we don't increment transport.success_count
# here on purpose (because we might not succeed).
# XXX double-check that asyncio behavior on TLS handshake
# failures is in fact as described above
orig = proto.connection_lost
@wraps(orig)
def lost(fail):
rtn = orig(fail)
if not txaio.is_called(done):
# asyncio will call connection_lost(None) in case of
# a transport failure, in which case we create an
# appropriate exception
if fail is None:
fail = TransportLost("failed to complete connection")
txaio.reject(done, fail)
return rtn
proto.connection_lost = lost
|
https://github.com/crossbario/autobahn-python/issues/1153
|
2019-03-25 15:29:00.597 13776 DEBUG autobahn.asyncio.component.Component [-] component failed: TransportLost: failed to complete connection _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.609 13776 DEBUG autobahn.asyncio.component.Component [-] Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
autobahn.wamp.exception.TransportLost: failed to complete connection
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.615 13776 ERROR autobahn.asyncio.component.Component [-] Connection failed: TransportLost: failed to complete connection: autobahn.wamp.exception.TransportLost: failed to complete connection
2019-03-25 15:29:00.624 13776 DEBUG autobahn.asyncio.component.Component [-] Entering re-connect loop _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.631 13776 DEBUG autobahn.asyncio.component.Component [-] trying transport 0 using connect delay 25.279705208019543 _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:25.946 13776 INFO autobahn.asyncio.component.Component [-] connecting once using transport type "websocket" over endpoint "tcp"
2019-03-25 15:29:29.403 13776 DEBUG autobahn.asyncio.component.Component [-] component failed: ConnectionResetError: [Errno 104] Connection reset by peer _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.448 13776 DEBUG autobahn.asyncio.component.Component [-] Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 239, in _step
result = coro.send(None)
File "/usr/lib/python3.5/asyncio/tasks.py", line 400, in wait_for
return fut.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 241, in _step
result = coro.throw(exc)
File "/usr/lib/python3.5/asyncio/base_events.py", line 801, in create_connection
sock, protocol_factory, ssl, server_hostname)
File "/usr/lib/python3.5/asyncio/base_events.py", line 827, in _create_connection_transport
yield from waiter
File "/usr/lib/python3.5/asyncio/futures.py", line 380, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.5/asyncio/tasks.py", line 304, in _wakeup
future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/selector_events.py", line 723, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.454 13776 INFO autobahn.asyncio.component.Component [-] Connection failed: ConnectionResetError: [Errno 104] Connection reset by peer
2019-03-25 15:29:29.471 13776 DEBUG autobahn.asyncio.component.Component [-] Entering re-connect loop _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.480 13776 DEBUG autobahn.asyncio.component.Component [-] trying transport 0 using connect delay 34.52392114857189 _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:04.060 13776 INFO autobahn.asyncio.component.Component [-] connecting once using transport type "websocket" over endpoint "tcp"
2019-03-25 15:30:08.150 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-]
[('logOctets', False, 'WampWebSocketClientFactory'),
('logFrames', False, 'WampWebSocketClientFactory'),
('trackTimings', False, 'WampWebSocketClientFactory'),
('utf8validateIncoming', True, 'WampWebSocketClientFactory'),
('applyMask', True, 'WampWebSocketClientFactory'),
('maxFramePayloadSize', 0, 'WampWebSocketClientFactory'),
('maxMessagePayloadSize', 0, 'WampWebSocketClientFactory'),
('autoFragmentSize', 0, 'WampWebSocketClientFactory'),
('failByDrop', True, 'WampWebSocketClientFactory'),
('echoCloseCodeReason', False, 'WampWebSocketClientFactory'),
('openHandshakeTimeout', 5, 'WampWebSocketClientFactory'),
('closeHandshakeTimeout', 1, 'WampWebSocketClientFactory'),
('tcpNoDelay', True, 'WampWebSocketClientFactory'),
('autoPingInterval', 0, 'WampWebSocketClientFactory'),
('autoPingTimeout', 0, 'WampWebSocketClientFactory'),
('autoPingSize', 4, 'WampWebSocketClientFactory'),
('version', 18, 'WampWebSocketClientFactory'),
('acceptMaskedServerFrames', False, 'WampWebSocketClientFactory'),
('maskClientFrames', True, 'WampWebSocketClientFactory'),
('serverConnectionDropTimeout', 1, 'WampWebSocketClientFactory'),
('perMessageCompressionOffers', [], 'WampWebSocketClientFactory'),
('perMessageCompressionAccept',
<function WebSocketClientFactory.resetProtocolOptions.<locals>.<lambda> at 0xb3d51300>,
'WampWebSocketClientFactory')] _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.163 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] connection to tcp:51.75.29.206:8181 established _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.180 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] GET / HTTP/1.1
User-Agent: AutobahnPython/19.3.3
Host: <OUR-CROSSBAR-SERVER-IP>:8181
Upgrade: WebSocket
Connection: Upgrade
Pragma: no-cache
Cache-Control: no-cache
Sec-WebSocket-Key: 8eiSBL+/0+X97Hz9R5+JhQ==
Sec-WebSocket-Protocol: wamp.2.json
Sec-WebSocket-Version: 13
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.191 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] _connectionLost: [Errno 104] Connection reset by peer _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:12.311 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] skipping opening handshake timeout: WebSocket connection is already closed _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
|
ConnectionResetError
|
def onClose(self, wasClean, code, reason):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onClose`
"""
# WAMP session might never have been established in the first place .. guard this!
self._onclose_reason = reason
if self._session is not None:
# WebSocket connection lost - fire off the WAMP
# session close callback
# noinspection PyBroadException
try:
self.log.debug(
'WAMP-over-WebSocket transport lost: wasClean={wasClean}, code={code}, reason="{reason}"',
wasClean=wasClean,
code=code,
reason=reason,
)
self._session.onClose(wasClean)
except Exception:
self.log.critical("{tb}", tb=traceback.format_exc())
self._session = None
|
def onClose(self, wasClean, code, reason):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onClose`
"""
# WAMP session might never have been established in the first place .. guard this!
if self._session is not None:
# WebSocket connection lost - fire off the WAMP
# session close callback
# noinspection PyBroadException
try:
self.log.debug(
'WAMP-over-WebSocket transport lost: wasClean={wasClean}, code={code}, reason="{reason}"',
wasClean=wasClean,
code=code,
reason=reason,
)
self._session.onClose(wasClean)
except Exception:
self.log.critical("{tb}", tb=traceback.format_exc())
self._session = None
|
https://github.com/crossbario/autobahn-python/issues/1153
|
2019-03-25 15:29:00.597 13776 DEBUG autobahn.asyncio.component.Component [-] component failed: TransportLost: failed to complete connection _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.609 13776 DEBUG autobahn.asyncio.component.Component [-] Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
autobahn.wamp.exception.TransportLost: failed to complete connection
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.615 13776 ERROR autobahn.asyncio.component.Component [-] Connection failed: TransportLost: failed to complete connection: autobahn.wamp.exception.TransportLost: failed to complete connection
2019-03-25 15:29:00.624 13776 DEBUG autobahn.asyncio.component.Component [-] Entering re-connect loop _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:00.631 13776 DEBUG autobahn.asyncio.component.Component [-] trying transport 0 using connect delay 25.279705208019543 _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:25.946 13776 INFO autobahn.asyncio.component.Component [-] connecting once using transport type "websocket" over endpoint "tcp"
2019-03-25 15:29:29.403 13776 DEBUG autobahn.asyncio.component.Component [-] component failed: ConnectionResetError: [Errno 104] Connection reset by peer _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.448 13776 DEBUG autobahn.asyncio.component.Component [-] Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/local/lib/python3.5/dist-packages/txaio/aio.py", line 514, in done
res = f.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 239, in _step
result = coro.send(None)
File "/usr/lib/python3.5/asyncio/tasks.py", line 400, in wait_for
return fut.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 241, in _step
result = coro.throw(exc)
File "/usr/lib/python3.5/asyncio/base_events.py", line 801, in create_connection
sock, protocol_factory, ssl, server_hostname)
File "/usr/lib/python3.5/asyncio/base_events.py", line 827, in _create_connection_transport
yield from waiter
File "/usr/lib/python3.5/asyncio/futures.py", line 380, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.5/asyncio/tasks.py", line 304, in _wakeup
future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 293, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/selector_events.py", line 723, in _read_ready
data = self._sock.recv(self.max_size)
ConnectionResetError: [Errno 104] Connection reset by peer
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.454 13776 INFO autobahn.asyncio.component.Component [-] Connection failed: ConnectionResetError: [Errno 104] Connection reset by peer
2019-03-25 15:29:29.471 13776 DEBUG autobahn.asyncio.component.Component [-] Entering re-connect loop _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:29:29.480 13776 DEBUG autobahn.asyncio.component.Component [-] trying transport 0 using connect delay 34.52392114857189 _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:04.060 13776 INFO autobahn.asyncio.component.Component [-] connecting once using transport type "websocket" over endpoint "tcp"
2019-03-25 15:30:08.150 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-]
[('logOctets', False, 'WampWebSocketClientFactory'),
('logFrames', False, 'WampWebSocketClientFactory'),
('trackTimings', False, 'WampWebSocketClientFactory'),
('utf8validateIncoming', True, 'WampWebSocketClientFactory'),
('applyMask', True, 'WampWebSocketClientFactory'),
('maxFramePayloadSize', 0, 'WampWebSocketClientFactory'),
('maxMessagePayloadSize', 0, 'WampWebSocketClientFactory'),
('autoFragmentSize', 0, 'WampWebSocketClientFactory'),
('failByDrop', True, 'WampWebSocketClientFactory'),
('echoCloseCodeReason', False, 'WampWebSocketClientFactory'),
('openHandshakeTimeout', 5, 'WampWebSocketClientFactory'),
('closeHandshakeTimeout', 1, 'WampWebSocketClientFactory'),
('tcpNoDelay', True, 'WampWebSocketClientFactory'),
('autoPingInterval', 0, 'WampWebSocketClientFactory'),
('autoPingTimeout', 0, 'WampWebSocketClientFactory'),
('autoPingSize', 4, 'WampWebSocketClientFactory'),
('version', 18, 'WampWebSocketClientFactory'),
('acceptMaskedServerFrames', False, 'WampWebSocketClientFactory'),
('maskClientFrames', True, 'WampWebSocketClientFactory'),
('serverConnectionDropTimeout', 1, 'WampWebSocketClientFactory'),
('perMessageCompressionOffers', [], 'WampWebSocketClientFactory'),
('perMessageCompressionAccept',
<function WebSocketClientFactory.resetProtocolOptions.<locals>.<lambda> at 0xb3d51300>,
'WampWebSocketClientFactory')] _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.163 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] connection to tcp:51.75.29.206:8181 established _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.180 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] GET / HTTP/1.1
User-Agent: AutobahnPython/19.3.3
Host: <OUR-CROSSBAR-SERVER-IP>:8181
Upgrade: WebSocket
Connection: Upgrade
Pragma: no-cache
Cache-Control: no-cache
Sec-WebSocket-Key: 8eiSBL+/0+X97Hz9R5+JhQ==
Sec-WebSocket-Protocol: wamp.2.json
Sec-WebSocket-Version: 13
_log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:08.191 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] _connectionLost: [Errno 104] Connection reset by peer _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
2019-03-25 15:30:12.311 13776 DEBUG autobahn.asyncio.websocket.WebSocketClientProtocol [-] skipping opening handshake timeout: WebSocket connection is already closed _log /usr/local/lib/python3.5/dist-packages/txaio/aio.py:201
|
ConnectionResetError
|
def startProxyConnect(self):
"""
Connect to explicit proxy.
"""
# construct proxy connect HTTP request
#
request = b"CONNECT %s:%d HTTP/1.1\x0d\x0a" % (
self.factory.host.encode("utf-8"),
self.factory.port,
)
request += b"Host: %s:%d\x0d\x0a" % (
self.factory.host.encode("utf-8"),
self.factory.port,
)
request += b"\x0d\x0a"
self.log.debug("{request}", request=request)
self.sendData(request)
|
def startProxyConnect(self):
"""
Connect to explicit proxy.
"""
# construct proxy connect HTTP request
#
request = "CONNECT %s:%d HTTP/1.1\x0d\x0a" % (
self.factory.host.encode("utf-8"),
self.factory.port,
)
request += "Host: %s:%d\x0d\x0a" % (
self.factory.host.encode("utf-8"),
self.factory.port,
)
request += "\x0d\x0a"
self.log.debug("{request}", request=request)
self.sendData(request)
|
https://github.com/crossbario/autobahn-python/issues/892
|
2017-09-12T14:19:58+0200 Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/usr/local/lib/python3.5/dist-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/usr/local/lib/python3.5/dist-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/usr/local/lib/python3.5/dist-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.5/dist-packages/twisted/internet/asyncioreactor.py", line 136, in _readOrWrite
why = method()
File "/usr/local/lib/python3.5/dist-packages/twisted/internet/tcp.py", line 586, in doConnect
self._connectDone()
File "/usr/local/lib/python3.5/dist-packages/twisted/internet/tcp.py", line 615, in _connectDone
self.protocol.makeConnection(self)
File "/usr/local/lib/python3.5/dist-packages/twisted/internet/protocol.py", line 494, in makeConnection
self.connectionMade()
File "/usr/local/lib/python3.5/dist-packages/autobahn/twisted/websocket.py", line 94, in connectionMade
self._connectionMade()
File "/usr/local/lib/python3.5/dist-packages/autobahn/websocket/protocol.py", line 3328, in _connectionMade
self.startProxyConnect()
File "/usr/local/lib/python3.5/dist-packages/autobahn/websocket/protocol.py", line 3354, in startProxyConnect
self.sendData(request)
File "/usr/local/lib/python3.5/dist-packages/autobahn/websocket/protocol.py", line 1297, in sendData
self.transport.write(data)
File "/usr/local/lib/python3.5/dist-packages/twisted/internet/_newtls.py", line 191, in write
FileDescriptor.write(self, bytes)
File "/usr/local/lib/python3.5/dist-packages/twisted/internet/abstract.py", line 348, in write
raise TypeError("Data must not be unicode")
builtins.TypeError: Data must not be unicode
|
builtins.TypeError
|
def as_view(cls, actions=None, **initkwargs):
"""
Because of the way class based views create a closure around the
instantiated view, we need to totally reimplement `.as_view`,
and slightly modify the view function that is created and returned.
"""
# The suffix initkwarg is reserved for identifying the viewset type
# eg. 'List' or 'Instance'.
cls.suffix = None
# actions must not be empty
if not actions:
raise TypeError(
"The `actions` argument must be provided when "
"calling `.as_view()` on a ViewSet. For example "
"`.as_view({'get': 'list'})`"
)
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError(
"You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that." % (key, cls.__name__)
)
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r" % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
# We also store the mapping of request methods to actions,
# so that we can later set the action attribute.
# eg. `self.action = 'list'` on an incoming GET request.
self.action_map = actions
# Bind methods to actions
# This is the bit that's different to a standard view
for method, action in actions.items():
handler = getattr(self, action)
setattr(self, method, handler)
if hasattr(self, "get") and not hasattr(self, "head"):
self.head = self.get
# And continue as usual
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
# We need to set these on the view function, so that breadcrumb
# generation can pick out these bits of information from a
# resolved URL.
view.cls = cls
view.initkwargs = initkwargs
view.suffix = initkwargs.get("suffix", None)
view.actions = actions
return csrf_exempt(view)
|
def as_view(cls, actions=None, **initkwargs):
"""
Because of the way class based views create a closure around the
instantiated view, we need to totally reimplement `.as_view`,
and slightly modify the view function that is created and returned.
"""
# The suffix initkwarg is reserved for identifying the viewset type
# eg. 'List' or 'Instance'.
cls.suffix = None
# actions must not be empty
if not actions:
raise TypeError(
"The `actions` argument must be provided when "
"calling `.as_view()` on a ViewSet. For example "
"`.as_view({'get': 'list'})`"
)
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError(
"You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that." % (key, cls.__name__)
)
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r" % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
# We also store the mapping of request methods to actions,
# so that we can later set the action attribute.
# eg. `self.action = 'list'` on an incoming GET request.
self.action_map = actions
# Bind methods to actions
# This is the bit that's different to a standard view
for method, action in actions.items():
handler = getattr(self, action)
setattr(self, method, handler)
# And continue as usual
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
# We need to set these on the view function, so that breadcrumb
# generation can pick out these bits of information from a
# resolved URL.
view.cls = cls
view.initkwargs = initkwargs
view.suffix = initkwargs.get("suffix", None)
view.actions = actions
return csrf_exempt(view)
|
https://github.com/encode/django-rest-framework/issues/4864
|
pip show djangorestframework
---
Name: djangorestframework
Version: 3.5.3
Location: /home/matwey/temp/venv/lib/python3.4/site-packages
Requires:
(venv)matwey@epsilon:~/temp/drf_test> python runtests.py
Creating test database for alias 'default' ('file:memorydb_default?mode=memory&cache=shared')...
Operations to perform:
Synchronize unmigrated apps: rest_framework, theapp
Apply all migrations: auth, authtoken, contenttypes
Synchronizing apps without migrations:
Creating tables...
Creating table theapp_model
Running deferred SQL...
Running migrations:
Applying contenttypes.0001_initial... OK
Applying contenttypes.0002_remove_content_type_name... OK
Applying auth.0001_initial... OK
Applying auth.0002_alter_permission_name_max_length... OK
Applying auth.0003_alter_user_email_max_length... OK
Applying auth.0004_alter_user_username_opts... OK
Applying auth.0005_alter_user_last_login_null... OK
Applying auth.0006_require_contenttypes_0002... OK
Applying auth.0007_alter_validators_add_error_messages... OK
Applying auth.0008_alter_user_username_max_length... OK
Applying authtoken.0001_initial... OK
Applying authtoken.0002_auto_20160226_1747... OK
test_model_head1 (tests.test.ModelTest) ... FAIL
======================================================================
FAIL: test_model_head1 (tests.test.ModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/matwey/temp/drf_test/tests/test.py", line 18, in test_model_head1
self.assertEqual(response.status_code, status.HTTP_200_OK)
AssertionError: 405 != 200
----------------------------------------------------------------------
Ran 1 test in 0.083s
FAILED (failures=1)
Destroying test database for alias 'default' ('file:memorydb_default?mode=memory&cache=shared')...
|
AssertionError
|
def view(request, *args, **kwargs):
self = cls(**initkwargs)
# We also store the mapping of request methods to actions,
# so that we can later set the action attribute.
# eg. `self.action = 'list'` on an incoming GET request.
self.action_map = actions
# Bind methods to actions
# This is the bit that's different to a standard view
for method, action in actions.items():
handler = getattr(self, action)
setattr(self, method, handler)
if hasattr(self, "get") and not hasattr(self, "head"):
self.head = self.get
# And continue as usual
return self.dispatch(request, *args, **kwargs)
|
def view(request, *args, **kwargs):
self = cls(**initkwargs)
# We also store the mapping of request methods to actions,
# so that we can later set the action attribute.
# eg. `self.action = 'list'` on an incoming GET request.
self.action_map = actions
# Bind methods to actions
# This is the bit that's different to a standard view
for method, action in actions.items():
handler = getattr(self, action)
setattr(self, method, handler)
# And continue as usual
return self.dispatch(request, *args, **kwargs)
|
https://github.com/encode/django-rest-framework/issues/4864
|
pip show djangorestframework
---
Name: djangorestframework
Version: 3.5.3
Location: /home/matwey/temp/venv/lib/python3.4/site-packages
Requires:
(venv)matwey@epsilon:~/temp/drf_test> python runtests.py
Creating test database for alias 'default' ('file:memorydb_default?mode=memory&cache=shared')...
Operations to perform:
Synchronize unmigrated apps: rest_framework, theapp
Apply all migrations: auth, authtoken, contenttypes
Synchronizing apps without migrations:
Creating tables...
Creating table theapp_model
Running deferred SQL...
Running migrations:
Applying contenttypes.0001_initial... OK
Applying contenttypes.0002_remove_content_type_name... OK
Applying auth.0001_initial... OK
Applying auth.0002_alter_permission_name_max_length... OK
Applying auth.0003_alter_user_email_max_length... OK
Applying auth.0004_alter_user_username_opts... OK
Applying auth.0005_alter_user_last_login_null... OK
Applying auth.0006_require_contenttypes_0002... OK
Applying auth.0007_alter_validators_add_error_messages... OK
Applying auth.0008_alter_user_username_max_length... OK
Applying authtoken.0001_initial... OK
Applying authtoken.0002_auto_20160226_1747... OK
test_model_head1 (tests.test.ModelTest) ... FAIL
======================================================================
FAIL: test_model_head1 (tests.test.ModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/matwey/temp/drf_test/tests/test.py", line 18, in test_model_head1
self.assertEqual(response.status_code, status.HTTP_200_OK)
AssertionError: 405 != 200
----------------------------------------------------------------------
Ran 1 test in 0.083s
FAILED (failures=1)
Destroying test database for alias 'default' ('file:memorydb_default?mode=memory&cache=shared')...
|
AssertionError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.