after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _consolidate(blocks, items):
"""
Merge blocks having same dtype
"""
get_dtype = lambda x: x.dtype.name
# sort by dtype
grouper = itertools.groupby(sorted(blocks, key=get_dtype), lambda x: x.dtype)
new_blocks = []
for dtype, group_blocks in grouper:
new_block = _merge_blocks(list(group_blocks), items)
new_blocks.append(new_block)
return new_blocks
|
def _consolidate(blocks, items):
"""
Merge blocks having same dtype
"""
get_dtype = lambda x: x.dtype
# sort by dtype
grouper = itertools.groupby(sorted(blocks, key=get_dtype), lambda x: x.dtype)
new_blocks = []
for dtype, group_blocks in grouper:
new_block = _merge_blocks(list(group_blocks), items)
new_blocks.append(new_block)
return new_blocks
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _nanvar(values, axis=None, skipna=True, ddof=1):
mask = isnull(values)
if axis is not None:
count = (values.shape[axis] - mask.sum(axis)).astype(float)
else:
count = float(values.size - mask.sum())
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
X = _ensure_numeric(values.sum(axis))
XX = _ensure_numeric((values**2).sum(axis))
return np.fabs((XX - X**2 / count) / (count - ddof))
|
def _nanvar(values, axis=None, skipna=True, ddof=1):
mask = isnull(values)
if axis is not None:
count = (values.shape[axis] - mask.sum(axis)).astype(float)
else:
count = float(values.size - mask.sum())
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
X = _ensure_numeric(values.sum(axis))
XX = _ensure_numeric((values**2).sum(axis))
return (XX - X**2 / count) / (count - ddof)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _nanmin(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type, (np.integer, np.datetime64)):
values = values.copy()
np.putmask(values, mask, np.inf)
# numpy 1.6.1 workaround in Python 3.x
if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover
import __builtin__
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(__builtin__.min, apply_ax, values)
else:
result = __builtin__.min(values)
else:
result = values.min(axis)
return _maybe_null_out(result, axis, mask)
|
def _nanmin(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type, np.integer):
values = values.copy()
np.putmask(values, mask, np.inf)
# numpy 1.6.1 workaround in Python 3.x
if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover
import __builtin__
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(__builtin__.min, apply_ax, values)
else:
result = __builtin__.min(values)
else:
result = values.min(axis)
return _maybe_null_out(result, axis, mask)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _nanmax(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type, (np.integer, np.datetime64)):
values = values.copy()
np.putmask(values, mask, -np.inf)
# numpy 1.6.1 workaround in Python 3.x
if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover
import __builtin__
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(__builtin__.max, apply_ax, values)
else:
result = __builtin__.max(values)
else:
result = values.max(axis)
return _maybe_null_out(result, axis, mask)
|
def _nanmax(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not issubclass(values.dtype.type, np.integer):
values = values.copy()
np.putmask(values, mask, -np.inf)
# numpy 1.6.1 workaround in Python 3.x
if values.dtype == np.object_ and sys.version_info[0] >= 3: # pragma: no cover
import __builtin__
if values.ndim > 1:
apply_ax = axis if axis is not None else 0
result = np.apply_along_axis(__builtin__.max, apply_ax, values)
else:
result = __builtin__.max(values)
else:
result = values.max(axis)
return _maybe_null_out(result, axis, mask)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = lib.Float64HashTable(len(values))
uniques = np.array(table.unique(com._ensure_float64(values)), dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = lib.Int64HashTable(len(values))
uniques = np.array(table.unique(com._ensure_int64(values)), dtype=np.int64)
uniques = uniques.view("M8[ns]")
elif np.issubdtype(values.dtype, np.integer):
table = lib.Int64HashTable(len(values))
uniques = np.array(table.unique(com._ensure_int64(values)), dtype=np.int64)
else:
table = lib.PyObjectHashTable(len(values))
uniques = table.unique(com._ensure_object(values))
uniques = lib.list_to_object_array(uniques)
return uniques
|
def unique1d(values):
"""
Hash table-based unique
"""
if issubclass(values.dtype.type, np.floating):
if values.dtype != np.float64:
values = values.astype(np.float64)
table = lib.Float64HashTable(len(values))
uniques = np.array(table.unique(values), dtype=np.float64)
elif issubclass(values.dtype.type, np.integer):
if values.dtype != np.int64:
values = values.astype(np.int64)
table = lib.Int64HashTable(len(values))
uniques = np.array(table.unique(values), dtype=np.int64)
else:
if not values.dtype == np.object_:
values = values.astype(np.object_)
table = lib.PyObjectHashTable(len(values))
uniques = lib.list_to_object_array(table.unique(values))
return uniques
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def swapaxes(self, axis1="major", axis2="minor", copy=True):
"""
Interchange axes and swap values axes appropriately
Returns
-------
y : Panel (new object)
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
raise ValueError("Cannot specify the same axis")
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(3))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes)
|
def swapaxes(self, axis1="major", axis2="minor"):
"""
Interchange axes and swap values axes appropriately
Returns
-------
y : Panel (new object)
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
raise ValueError("Cannot specify the same axis")
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(3))
new_values = self.values.swapaxes(i, j).copy()
return self._constructor(new_values, *new_axes)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _extract_axis(data, axis=0, intersect=False):
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, DataFrame):
have_frames = True
indexes.append(v._get_axis(axis))
else:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("ndarrays must match shape on axis %d" % axis)
if have_frames:
assert lengths[0] == len(index)
else:
index = Index(np.arange(lengths[0]))
return _ensure_index(index)
|
def _extract_axis(data, axis=0, intersect=False):
from pandas.core.index import _union_indexes
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, DataFrame):
have_frames = True
indexes.append(v._get_axis(axis))
else:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("ndarrays must match shape on axis %d" % axis)
if have_frames:
assert lengths[0] == len(index)
else:
index = Index(np.arange(lengths[0]))
return _ensure_index(index)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
new_values = np.empty((length, result_width), dtype=values.dtype)
new_mask = np.zeros((length, result_width), dtype=bool)
if issubclass(values.dtype.type, np.integer):
new_values = new_values.astype(float)
new_values.fill(np.nan)
# is there a simpler / faster way of doing this?
for i in xrange(values.shape[1]):
chunk = new_values[:, i * width : (i + 1) * width]
mask_chunk = new_mask[:, i * width : (i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
new_values = new_values.take(self.unique_groups, axis=0)
return new_values, new_mask
|
def get_new_values(self):
return self._reshape_values(self.values)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def unstack(obj, level):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level)
return unstacker.get_result()
|
def unstack(obj, level):
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level)
return unstacker.get_result()
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= len(self) or loc < 0:
raise Exception("Out of bounds access")
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return lib.get_value_at(self, sp_loc)
|
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= len(self) or loc < 0:
raise Exception("Out of bounds access")
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return _gin.get_value_at(self, sp_loc)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def __getitem__(self, item):
"""
Retrieve column or slice from DataFrame
"""
try:
# unsure about how kludgy this is
s = self._series[item]
s.name = item
return s
except (TypeError, KeyError):
if isinstance(item, slice):
date_rng = self.index[item]
return self.reindex(date_rng)
elif isinstance(item, np.ndarray):
if len(item) != len(self.index):
raise Exception(
"Item wrong length %d instead of %d!" % (len(item), len(self.index))
)
newIndex = self.index[item]
return self.reindex(newIndex)
else: # pragma: no cover
raise
|
def __getitem__(self, item):
"""
Retrieve column or slice from DataFrame
"""
try:
# unsure about how kludgy this is
s = self._series[item]
s.name = item
return s
except (TypeError, KeyError):
if isinstance(item, slice):
dateRange = self.index[item]
return self.reindex(dateRange)
elif isinstance(item, np.ndarray):
if len(item) != len(self.index):
raise Exception(
"Item wrong length %d instead of %d!" % (len(item), len(self.index))
)
newIndex = self.index[item]
return self.reindex(newIndex)
else: # pragma: no cover
raise
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join="outer", level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError
if self.empty and other.empty:
return SparseDataFrame(index=new_index)
new_data = {}
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
return self._constructor(data=new_data, index=new_index, columns=new_columns)
|
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join="outer", level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError
if not self and not other:
return SparseDataFrame(index=new_index)
new_data = {}
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
return self._constructor(data=new_data, index=new_index, columns=new_columns)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None):
if level is not None:
raise Exception("Reindex by level not supported for sparse")
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return SparseDataFrame(index=index, columns=self.columns)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = com._ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
values = series.values
new = values.take(indexer)
if need_mask:
np.putmask(new, mask, fill_value)
new_series[col] = new
return SparseDataFrame(
new_series,
index=index,
columns=self.columns,
default_fill_value=self.default_fill_value,
)
|
def _reindex_index(self, index, method, copy, level, fill_value=np.nan):
if level is not None:
raise Exception("Reindex by level not supported for sparse")
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return SparseDataFrame(index=index, columns=self.columns)
indexer = self.index.get_indexer(index, method)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
values = series.values
new = values.take(indexer)
if need_mask:
np.putmask(new, mask, fill_value)
new_series[col] = new
return SparseDataFrame(
new_series,
index=index,
columns=self.columns,
default_fill_value=self.default_fill_value,
)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _reindex_columns(self, columns, copy, level, fill_value, limit=None):
if level is not None:
raise Exception("Reindex by level not supported for sparse")
if com.notnull(fill_value):
raise NotImplementedError
if limit:
raise NotImplementedError
# TODO: fill value handling
sdict = dict((k, v) for k, v in self.iteritems() if k in columns)
return SparseDataFrame(
sdict,
index=self.index,
columns=columns,
default_fill_value=self.default_fill_value,
)
|
def _reindex_columns(self, columns, copy, level, fill_value):
if level is not None:
raise Exception("Reindex by level not supported for sparse")
if com.notnull(fill_value):
raise NotImplementedError
# TODO: fill value handling
sdict = dict((k, v) for k, v in self.iteritems() if k in columns)
return SparseDataFrame(
sdict,
index=self.index,
columns=columns,
default_fill_value=self.default_fill_value,
)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def take(self, indices, axis=0):
"""
Analogous to ndarray.take, return SparseDataFrame corresponding to
requested indices along an axis
Parameters
----------
indices : list / array of ints
axis : {0, 1}
Returns
-------
taken : SparseDataFrame
"""
indices = com._ensure_platform_int(indices)
new_values = self.values.take(indices, axis=axis)
if axis == 0:
new_columns = self.columns
new_index = self.index.take(indices)
else:
new_columns = self.columns.take(indices)
new_index = self.index
return self._constructor(new_values, index=new_index, columns=new_columns)
|
def take(self, indices, axis=0):
"""
Analogous to ndarray.take, return SparseDataFrame corresponding to
requested indices along an axis
Parameters
----------
indices : list / array of ints
axis : {0, 1}
Returns
-------
taken : SparseDataFrame
"""
new_values = self.values.take(indices, axis=axis)
if axis == 0:
new_columns = self.columns
new_index = self.index.take(indices)
else:
new_columns = self.columns.take(indices)
new_index = self.index
return self._constructor(new_values, index=new_index, columns=new_columns)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def shift(self, periods, freq=None, **kwds):
"""
Analogous to DataFrame.shift
"""
from pandas.core.series import _resolve_offset
offset = _resolve_offset(freq, kwds)
new_series = {}
if offset is None:
new_index = self.index
for col, s in self.iteritems():
new_series[col] = s.shift(periods)
else:
new_index = self.index.shift(periods, offset)
for col, s in self.iteritems():
new_series[col] = SparseSeries(
s.sp_values,
index=new_index,
sparse_index=s.sp_index,
fill_value=s.fill_value,
)
return SparseDataFrame(
new_series,
index=new_index,
columns=self.columns,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind,
)
|
def shift(self, periods, offset=None, timeRule=None):
"""
Analogous to DataFrame.shift
"""
if timeRule is not None and offset is None:
offset = datetools.getOffset(timeRule)
new_series = {}
if offset is None:
new_index = self.index
for col, s in self.iteritems():
new_series[col] = s.shift(periods)
else:
new_index = self.index.shift(periods, offset)
for col, s in self.iteritems():
new_series[col] = SparseSeries(
s.sp_values,
index=new_index,
sparse_index=s.sp_index,
fill_value=s.fill_value,
)
return SparseDataFrame(
new_series,
index=new_index,
columns=self.columns,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind,
)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def fillna(self, value=None, method="pad", inplace=False, limit=None):
new_series = {}
for k, v in self.iterkv():
new_series[k] = v.fillna(value=value, method=method, limit=limit)
if inplace:
self._series = new_series
return self
else:
return self._constructor(new_series, index=self.index, columns=self.columns)
|
def fillna(self, value=None, method="pad", inplace=False):
new_series = {}
for k, v in self.iterkv():
new_series[k] = v.fillna(value=value, method=method)
if inplace:
self._series = new_series
return self
else:
return self._constructor(new_series, index=self.index, columns=self.columns)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def __getstate__(self):
# pickling
return (
self._frames,
com._pickle_array(self.items),
com._pickle_array(self.major_axis),
com._pickle_array(self.minor_axis),
self.default_fill_value,
self.default_kind,
)
|
def __getstate__(self):
# pickling
return (
self._frames,
_pickle_array(self.items),
_pickle_array(self.major_axis),
_pickle_array(self.minor_axis),
self.default_fill_value,
self.default_kind,
)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(com._unpickle_array(items))
self._major_axis = _ensure_index(com._unpickle_array(major))
self._minor_axis = _ensure_index(com._unpickle_array(minor))
self._frames = frames
|
def __setstate__(self, state):
frames, items, major, minor, fv, kind = state
self.default_fill_value = fv
self.default_kind = kind
self._items = _ensure_index(_unpickle_array(items))
self._major_axis = _ensure_index(_unpickle_array(major))
self._minor_axis = _ensure_index(_unpickle_array(minor))
self._frames = frames
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def reindex(
self,
major=None,
items=None,
minor=None,
major_axis=None,
minor_axis=None,
copy=False,
):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = com._mut_exclusive(major, major_axis)
minor = com._mut_exclusive(minor, minor_axis)
if com._all_none(items, major, minor):
raise ValueError("Must specify at least one axis")
major = self.major_axis if major is None else major
minor = self.minor_axis if minor is None else minor
if items is not None:
new_frames = {}
for item in items:
if item in self._frames:
new_frames[item] = self._frames[item]
else:
raise Exception("Reindexing with new items not yet supported")
else:
new_frames = self._frames
if copy:
new_frames = dict((k, v.copy()) for k, v in new_frames.iteritems())
return SparsePanel(
new_frames,
items=items,
major_axis=major,
minor_axis=minor,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind,
)
|
def reindex(
self,
major=None,
items=None,
minor=None,
major_axis=None,
minor_axis=None,
copy=False,
):
"""
Conform / reshape panel axis labels to new input labels
Parameters
----------
major : array-like, default None
items : array-like, default None
minor : array-like, default None
copy : boolean, default False
Copy underlying SparseDataFrame objects
Returns
-------
reindexed : SparsePanel
"""
major = _mut_exclusive(major, major_axis)
minor = _mut_exclusive(minor, minor_axis)
if None == major == items == minor:
raise ValueError("Must specify at least one axis")
major = self.major_axis if major is None else major
minor = self.minor_axis if minor is None else minor
if items is not None:
new_frames = {}
for item in items:
if item in self._frames:
new_frames[item] = self._frames[item]
else:
raise Exception("Reindexing with new items not yet supported")
else:
new_frames = self._frames
if copy:
new_frames = dict((k, v.copy()) for k, v in new_frames.iteritems())
return SparsePanel(
new_frames,
items=items,
major_axis=major,
minor_axis=minor,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind,
)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def reindex(self, index=None, method=None, copy=True, limit=None):
"""
Conform SparseSeries to new Index
See Series.reindex docstring for general behavior
Returns
-------
reindexed : SparseSeries
"""
new_index = _ensure_index(index)
if self.index.equals(new_index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
# FIXME: inelegant / slow
values = np.empty(len(new_index), dtype=np.float64)
values.fill(nan)
return SparseSeries(values, index=new_index, fill_value=self.fill_value)
new_index, fill_vec = self.index.reindex(index, method=method, limit=limit)
new_values = common.take_1d(self.values, fill_vec)
return SparseSeries(
new_values, index=new_index, fill_value=self.fill_value, name=self.name
)
|
def reindex(self, index=None, method=None, copy=True):
"""
Conform SparseSeries to new Index
See Series.reindex docstring for general behavior
Returns
-------
reindexed : SparseSeries
"""
new_index = _ensure_index(index)
if self.index.equals(new_index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
# FIXME: inelegant / slow
values = np.empty(len(new_index), dtype=np.float64)
values.fill(nan)
return SparseSeries(values, index=new_index, fill_value=self.fill_value)
new_index, fill_vec = self.index.reindex(index, method=method)
new_values = common.take_1d(self.values, fill_vec)
return SparseSeries(
new_values, index=new_index, fill_value=self.fill_value, name=self.name
)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def fillna(self, value=None, method="pad", inplace=False, limit=None):
dense = self.to_dense()
filled = dense.fillna(value=value, method=method, limit=limit)
result = filled.to_sparse(kind=self.kind, fill_value=self.fill_value)
if inplace:
self.sp_values[:] = result.values
return self
else:
return result
|
def fillna(self, value=None, method="pad", inplace=False):
dense = self.to_dense()
filled = dense.fillna(value=value, method=method)
result = filled.to_sparse(kind=self.kind, fill_value=self.fill_value)
if inplace:
self.sp_values[:] = result.values
return self
else:
return result
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def shift(self, periods, freq=None, **kwds):
"""
Analogous to Series.shift
"""
from pandas.core.series import _resolve_offset
offset = _resolve_offset(freq, kwds)
# no special handling of fill values yet
if not isnull(self.fill_value):
dense_shifted = self.to_dense().shift(periods, freq=freq, **kwds)
return dense_shifted.to_sparse(fill_value=self.fill_value, kind=self.kind)
if periods == 0:
return self.copy()
if offset is not None:
return SparseSeries(
self.sp_values,
sparse_index=self.sp_index,
index=self.index.shift(periods, offset),
fill_value=self.fill_value,
)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = IntIndex(len(self), new_indices)
if isinstance(self.sp_index, BlockIndex):
new_sp_index = new_sp_index.to_block_index()
return SparseSeries(
self.sp_values[start:end].copy(),
index=self.index,
sparse_index=new_sp_index,
fill_value=self.fill_value,
)
|
def shift(self, periods, offset=None, timeRule=None):
"""
Analogous to Series.shift
"""
# no special handling of fill values yet
if not isnull(self.fill_value):
dense_shifted = self.to_dense().shift(periods, offset=offset, timeRule=timeRule)
return dense_shifted.to_sparse(fill_value=self.fill_value, kind=self.kind)
if periods == 0:
return self.copy()
if timeRule is not None and offset is None:
offset = datetools.getOffset(timeRule)
if offset is not None:
return SparseSeries(
self.sp_values,
sparse_index=self.sp_index,
index=self.index.shift(periods, offset),
fill_value=self.fill_value,
)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = IntIndex(len(self), new_indices)
if isinstance(self.sp_index, BlockIndex):
new_sp_index = new_sp_index.to_block_index()
return SparseSeries(
self.sp_values[start:end].copy(),
index=self.index,
sparse_index=new_sp_index,
fill_value=self.fill_value,
)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def generate_take_cython_file(path="generated.pyx"):
with open(path, "w") as f:
print >> f, header
print >> f, generate_ensure_dtypes()
for template in templates_1d:
print >> f, generate_from_template(template)
for template in templates_2d:
print >> f, generate_from_template(template, ndim=2)
# for template in templates_1d_datetime:
# print >> f, generate_from_template_datetime(template)
# for template in templates_2d_datetime:
# print >> f, generate_from_template_datetime(template, ndim=2)
for template in nobool_1d_templates:
print >> f, generate_from_template(template, exclude=["bool"])
|
def generate_take_cython_file(path="generated.pyx"):
with open(path, "w") as f:
for template in templates_1d:
print >> f, generate_from_template(template)
for template in templates_2d:
print >> f, generate_from_template(template, ndim=2)
for template in nobool_1d_templates:
print >> f, generate_from_template(template, exclude=["bool"])
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def quantileTS(frame, percentile):
"""
Return score at percentile for each point in time (cross-section)
Parameters
----------
frame: DataFrame
percentile: int
nth percentile
Returns
-------
Series (or TimeSeries)
"""
def func(x):
x = np.asarray(x.valid())
if x.any():
return scoreatpercentile(x, percentile)
else:
return NaN
return frame.apply(func, axis=1)
|
def quantileTS(frame, percentile):
"""
Return score at percentile for each point in time (cross-section)
Parameters
----------
frame: DataFrame
percentile: int
nth percentile
See also
--------
scipy.stats.scoreatpercentile
Returns
-------
Series (or TimeSeries)
"""
from scipy.stats import scoreatpercentile
def func(x):
x = np.asarray(x.valid())
if x.any():
return scoreatpercentile(x, percentile)
else:
return NaN
return frame.apply(func, axis=1)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def percentileRank(frame, column=None, kind="mean"):
"""
Return score at percentile for each point in time (cross-section)
Parameters
----------
frame: DataFrame
column: string or Series, optional
Column name or specific Series to compute percentiles for.
If not provided, percentiles are computed for all values at each
point in time. Note that this can take a LONG time.
kind: {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
TimeSeries or DataFrame, depending on input
"""
from pandas.compat.scipy import percentileofscore
fun = lambda xs, score: percentileofscore(remove_na(xs), score, kind=kind)
results = {}
framet = frame.T
if column is not None:
if isinstance(column, Series):
for date, xs in frame.T.iteritems():
results[date] = fun(xs, column.get(date, NaN))
else:
for date, xs in frame.T.iteritems():
results[date] = fun(xs, xs[column])
results = Series(results)
else:
for column in frame.columns:
for date, xs in framet.iteritems():
results.setdefault(date, {})[column] = fun(xs, xs[column])
results = DataFrame(results).T
return results
|
def percentileRank(frame, column=None, kind="mean"):
"""
Return score at percentile for each point in time (cross-section)
Parameters
----------
frame: DataFrame
column: string or Series, optional
Column name or specific Series to compute percentiles for.
If not provided, percentiles are computed for all values at each
point in time. Note that this can take a LONG time.
kind: {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
See also
--------
scipy.stats.percentileofscore
Returns
-------
TimeSeries or DataFrame, depending on input
"""
from scipy.stats import percentileofscore
fun = lambda xs, score: percentileofscore(remove_na(xs), score, kind=kind)
results = {}
framet = frame.T
if column is not None:
if isinstance(column, Series):
for date, xs in frame.T.iteritems():
results[date] = fun(xs, column.get(date, NaN))
else:
for date, xs in frame.T.iteritems():
results[date] = fun(xs, xs[column])
results = Series(results)
else:
for column in frame.columns:
for date, xs in framet.iteritems():
results.setdefault(date, {})[column] = fun(xs, xs[column])
results = DataFrame(results).T
return results
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def rolling_count(arg, window, freq=None, time_rule=None):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
Returns
-------
rolling_count : type of caller
"""
arg = _conv_timerule(arg, freq, time_rule)
window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
result = rolling_sum(converted, window, min_periods=1, time_rule=time_rule)
# putmask here?
result[np.isnan(result)] = 0
return return_hook(result)
|
def rolling_count(arg, window, time_rule=None):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
Returns
-------
rolling_count : type of caller
"""
arg = _conv_timerule(arg, time_rule)
window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
result = rolling_sum(converted, window, min_periods=1, time_rule=time_rule)
# putmask here?
result[np.isnan(result)] = 0
return return_hook(result)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _rolling_moment(arg, window, func, minp, axis=0, freq=None, time_rule=None):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
func : Cython function to compute rolling statistic on raw series
minp : int
Minimum number of observations required to have a value
axis : int, default 0
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
Returns
-------
y : type of input
"""
arg = _conv_timerule(arg, freq, time_rule)
calc = lambda x: func(x, window, minp=minp)
return_hook, values = _process_data_structure(arg)
# actually calculate the moment. Faster way to do this?
result = np.apply_along_axis(calc, axis, values)
return return_hook(result)
|
def _rolling_moment(arg, window, func, minp, axis=0, time_rule=None):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
func : Cython function to compute rolling statistic on raw series
minp : int
Minimum number of observations required to have a value
axis : int, default 0
time_rule : string or DateOffset
Time rule to conform to before computing result
Returns
-------
y : type of input
"""
arg = _conv_timerule(arg, time_rule)
calc = lambda x: func(x, window, minp=minp)
return_hook, values = _process_data_structure(arg)
# actually calculate the moment. Faster way to do this?
result = np.apply_along_axis(calc, axis, values)
return return_hook(result)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def ewma(arg, com=None, span=None, min_periods=0, freq=None, time_rule=None):
com = _get_center_of_mass(com, span)
arg = _conv_timerule(arg, freq, time_rule)
def _ewma(v):
result = _tseries.ewma(v, com)
first_index = _first_valid_index(v)
result[first_index : first_index + min_periods] = NaN
return result
return_hook, values = _process_data_structure(arg)
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
|
def ewma(arg, com=None, span=None, min_periods=0, time_rule=None):
com = _get_center_of_mass(com, span)
arg = _conv_timerule(arg, time_rule)
def _ewma(v):
result = _tseries.ewma(v, com)
first_index = _first_valid_index(v)
result[first_index : first_index + min_periods] = NaN
return result
return_hook, values = _process_data_structure(arg)
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def ewmvar(
arg, com=None, span=None, min_periods=0, bias=False, freq=None, time_rule=None
):
com = _get_center_of_mass(com, span)
arg = _conv_timerule(arg, freq, time_rule)
moment2nd = ewma(arg * arg, com=com, min_periods=min_periods)
moment1st = ewma(arg, com=com, min_periods=min_periods)
result = moment2nd - moment1st**2
if not bias:
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
|
def ewmvar(arg, com=None, span=None, min_periods=0, bias=False, time_rule=None):
com = _get_center_of_mass(com, span)
arg = _conv_timerule(arg, time_rule)
moment2nd = ewma(arg * arg, com=com, min_periods=min_periods)
moment1st = ewma(arg, com=com, min_periods=min_periods)
result = moment2nd - moment1st**2
if not bias:
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def ewmcov(
arg1,
arg2,
com=None,
span=None,
min_periods=0,
bias=False,
freq=None,
time_rule=None,
):
X, Y = _prep_binary(arg1, arg2)
X = _conv_timerule(X, freq, time_rule)
Y = _conv_timerule(Y, freq, time_rule)
mean = lambda x: ewma(x, com=com, span=span, min_periods=min_periods)
result = mean(X * Y) - mean(X) * mean(Y)
com = _get_center_of_mass(com, span)
if not bias:
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
|
def ewmcov(arg1, arg2, com=None, span=None, min_periods=0, bias=False, time_rule=None):
X, Y = _prep_binary(arg1, arg2)
X = _conv_timerule(X, time_rule)
Y = _conv_timerule(Y, time_rule)
mean = lambda x: ewma(x, com=com, span=span, min_periods=min_periods)
result = mean(X * Y) - mean(X) * mean(Y)
com = _get_center_of_mass(com, span)
if not bias:
result *= (1.0 + 2.0 * com) / (2.0 * com)
return result
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def ewmcorr(arg1, arg2, com=None, span=None, min_periods=0, freq=None, time_rule=None):
X, Y = _prep_binary(arg1, arg2)
X = _conv_timerule(X, freq, time_rule)
Y = _conv_timerule(Y, freq, time_rule)
mean = lambda x: ewma(x, com=com, span=span, min_periods=min_periods)
var = lambda x: ewmvar(x, com=com, span=span, min_periods=min_periods, bias=True)
return (mean(X * Y) - mean(X) * mean(Y)) / np.sqrt(var(X) * var(Y))
|
def ewmcorr(arg1, arg2, com=None, span=None, min_periods=0, time_rule=None):
X, Y = _prep_binary(arg1, arg2)
X = _conv_timerule(X, time_rule)
Y = _conv_timerule(Y, time_rule)
mean = lambda x: ewma(x, com=com, span=span, min_periods=min_periods)
var = lambda x: ewmvar(x, com=com, span=span, min_periods=min_periods, bias=True)
return (mean(X * Y) - mean(X) * mean(Y)) / np.sqrt(var(X) * var(Y))
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _conv_timerule(arg, freq, time_rule):
if time_rule is not None:
import warnings
warnings.warn(
"time_rule argument is deprecated, replace with freq", FutureWarning
)
freq = time_rule
types = (DataFrame, Series)
if freq is not None and isinstance(arg, types):
# Conform to whatever frequency needed.
arg = arg.resample(freq)
return arg
|
def _conv_timerule(arg, time_rule):
types = (DataFrame, Series)
if time_rule is not None and isinstance(arg, types):
# Conform to whatever frequency needed.
arg = arg.asfreq(time_rule)
return arg
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _rolling_func(func, desc, check_minp=_use_window):
@Substitution(desc, _unary_arg, _type_of_input)
@Appender(_doc_template)
@wraps(func)
def f(arg, window, min_periods=None, freq=None, time_rule=None):
def call_cython(arg, window, minp):
minp = check_minp(minp, window)
return func(arg, window, minp)
return _rolling_moment(
arg, window, call_cython, min_periods, freq=freq, time_rule=time_rule
)
return f
|
def _rolling_func(func, desc, check_minp=_use_window):
@Substitution(desc, _unary_arg, _type_of_input)
@Appender(_doc_template)
@wraps(func)
def f(arg, window, min_periods=None, time_rule=None):
def call_cython(arg, window, minp):
minp = check_minp(minp, window)
return func(arg, window, minp)
return _rolling_moment(
arg, window, call_cython, min_periods, time_rule=time_rule
)
return f
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def f(arg, window, min_periods=None, freq=None, time_rule=None):
def call_cython(arg, window, minp):
minp = check_minp(minp, window)
return func(arg, window, minp)
return _rolling_moment(
arg, window, call_cython, min_periods, freq=freq, time_rule=time_rule
)
|
def f(arg, window, min_periods=None, time_rule=None):
def call_cython(arg, window, minp):
minp = check_minp(minp, window)
return func(arg, window, minp)
return _rolling_moment(arg, window, call_cython, min_periods, time_rule=time_rule)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def rolling_quantile(
arg, window, quantile, min_periods=None, freq=None, time_rule=None
):
"""Moving quantile
Parameters
----------
arg : Series, DataFrame
window : Number of observations used for calculating statistic
quantile : 0 <= quantile <= 1
min_periods : int
Minimum number of observations in window required to have a value
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
Returns
-------
y : type of input argument
"""
def call_cython(arg, window, minp):
minp = _use_window(minp, window)
return _tseries.roll_quantile(arg, window, minp, quantile)
return _rolling_moment(
arg, window, call_cython, min_periods, freq=freq, time_rule=time_rule
)
|
def rolling_quantile(arg, window, quantile, min_periods=None, time_rule=None):
"""Moving quantile
Parameters
----------
arg : Series, DataFrame
window : Number of observations used for calculating statistic
quantile : 0 <= quantile <= 1
min_periods : int
Minimum number of observations in window required to have a value
time_rule : {None, 'WEEKDAY', 'EOM', 'W@MON', ...}, default=None
Name of time rule to conform to before computing statistic
Returns
-------
y : type of input argument
"""
def call_cython(arg, window, minp):
minp = _use_window(minp, window)
return _tseries.roll_quantile(arg, window, minp, quantile)
return _rolling_moment(arg, window, call_cython, min_periods, time_rule=time_rule)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def rolling_apply(arg, window, func, min_periods=None, freq=None, time_rule=None):
"""Generic moving function application
Parameters
----------
arg : Series, DataFrame
window : Number of observations used for calculating statistic
func : function
Must produce a single value from an ndarray input
min_periods : int
Minimum number of observations in window required to have a value
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
Returns
-------
y : type of input argument
"""
def call_cython(arg, window, minp):
minp = _use_window(minp, window)
return _tseries.roll_generic(arg, window, minp, func)
return _rolling_moment(
arg, window, call_cython, min_periods, freq=freq, time_rule=time_rule
)
|
def rolling_apply(arg, window, func, min_periods=None, time_rule=None):
"""Generic moving function application
Parameters
----------
arg : Series, DataFrame
window : Number of observations used for calculating statistic
func : function
Must produce a single value from an ndarray input
min_periods : int
Minimum number of observations in window required to have a value
time_rule : {None, 'WEEKDAY', 'EOM', 'W@MON', ...}, default=None
Name of time rule to conform to before computing statistic
Returns
-------
y : type of input argument
"""
def call_cython(arg, window, minp):
minp = _use_window(minp, window)
return _tseries.roll_generic(arg, window, minp, func)
return _rolling_moment(arg, window, call_cython, min_periods, time_rule=time_rule)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def pivot_table(
data,
values=None,
rows=None,
cols=None,
aggfunc="mean",
fill_value=None,
margins=False,
):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in the
pivot table will be stored in MultiIndex objects (hierarchical indexes) on
the index and columns of the result DataFrame
Parameters
----------
data : DataFrame
values : column to aggregate, optional
rows : list of column names or arrays to group on
Keys to group on the x-axis of the pivot table
cols : list of column names or arrays to group on
Keys to group on the y-axis of the pivot table
aggfunc : function, default numpy.mean, or list of functions
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names (inferred
from the function objects themselves)
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
Examples
--------
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', rows=['A', 'B'],
... cols=['C'], aggfunc=np.sum)
>>> table
small large
foo one 1 4
two 6 NaN
bar one 5 4
two 6 7
Returns
-------
table : DataFrame
"""
rows = _convert_by(rows)
cols = _convert_by(cols)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(
data,
values=values,
rows=rows,
cols=cols,
fill_value=fill_value,
aggfunc=func,
margins=margins,
)
pieces.append(table)
keys.append(func.__name__)
return concat(pieces, keys=keys, axis=1)
keys = rows + cols
values_passed = values is not None
if values_passed:
if isinstance(values, (list, tuple)):
values_multi = True
else:
values_multi = False
values = [values]
else:
values = list(data.columns.drop(keys))
if values_passed:
to_filter = []
for x in keys + values:
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
to_unstack = [agged.index.names[i] for i in range(len(rows), len(keys))]
table = agged.unstack(to_unstack)
if isinstance(table, DataFrame):
if isinstance(table.columns, MultiIndex):
table = table.sortlevel(axis=1)
else:
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value)
if margins:
table = _add_margins(table, data, values, rows=rows, cols=cols, aggfunc=aggfunc)
# discard the top level
if values_passed and not values_multi:
table = table[values[0]]
return table
|
def pivot_table(
data,
values=None,
rows=None,
cols=None,
aggfunc="mean",
fill_value=None,
margins=False,
):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in the
pivot table will be stored in MultiIndex objects (hierarchical indexes) on
the index and columns of the result DataFrame
Parameters
----------
data : DataFrame
values : column to aggregate, optional
rows : list of column names or arrays to group on
Keys to group on the x-axis of the pivot table
cols : list of column names or arrays to group on
Keys to group on the y-axis of the pivot table
aggfunc : function, default numpy.mean, or list of functions
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names (inferred
from the function objects themselves)
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
Examples
--------
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', rows=['A', 'B'],
... cols=['C'], aggfunc=np.sum)
>>> table
small large
foo one 1 4
two 6 NaN
bar one 5 4
two 6 7
Returns
-------
table : DataFrame
"""
rows = _convert_by(rows)
cols = _convert_by(cols)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(
data,
values=values,
rows=rows,
cols=cols,
fill_value=fill_value,
aggfunc=func,
margins=margins,
)
pieces.append(table)
keys.append(func.__name__)
return concat(pieces, keys=keys, axis=1)
keys = rows + cols
values_passed = values is not None
if values_passed:
if isinstance(values, (list, tuple)):
values_multi = True
else:
values_multi = False
values = [values]
else:
values = list(data.columns.drop(keys))
if values_passed:
to_filter = []
for x in keys + values:
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
table = agged
for i in range(len(cols)):
name = table.index.names[len(rows)]
table = table.unstack(name)
if fill_value is not None:
table = table.fillna(value=fill_value)
if margins:
table = _add_margins(table, data, values, rows=rows, cols=cols, aggfunc=aggfunc)
# discard the top level
if values_passed and not values_multi:
table = table[values[0]]
return table
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def scatter_matrix(
frame,
alpha=0.5,
figsize=None,
ax=None,
grid=False,
diagonal="hist",
marker=".",
**kwds,
):
"""
Draw a matrix of scatter plots.
Parameters
----------
alpha : amount of transparency applied
figsize : a tuple (width, height) in inches
ax : Matplotlib axis object
grid : setting this to True will show the grid
diagonal : pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plon in the diagonal
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
fig, axes = _subplots(nrows=n, ncols=n, figsize=figsize, ax=ax, squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
for i, a in zip(range(n), df.columns):
for j, b in zip(range(n), df.columns):
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == "hist":
axes[i, j].hist(values)
elif diagonal == "kde":
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
axes[i, j].plot(ind, gkde.evaluate(ind), **kwds)
else:
common = (mask[a] & mask[b]).values
axes[i, j].scatter(
df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds
)
axes[i, j].set_xlabel("")
axes[i, j].set_ylabel("")
axes[i, j].set_xticklabels([])
axes[i, j].set_yticklabels([])
ticks = df.index
is_datetype = ticks.inferred_type in ("datetime", "date", "datetime64")
if ticks.is_numeric() or is_datetype:
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
ticks = ticks._mpl_repr()
# setup labels
if i == 0 and j % 2 == 1:
axes[i, j].set_xlabel(b, visible=True)
# axes[i, j].xaxis.set_visible(True)
axes[i, j].set_xlabel(b)
axes[i, j].set_xticklabels(ticks)
axes[i, j].xaxis.set_ticks_position("top")
axes[i, j].xaxis.set_label_position("top")
if i == n - 1 and j % 2 == 0:
axes[i, j].set_xlabel(b, visible=True)
# axes[i, j].xaxis.set_visible(True)
axes[i, j].set_xlabel(b)
axes[i, j].set_xticklabels(ticks)
axes[i, j].xaxis.set_ticks_position("bottom")
axes[i, j].xaxis.set_label_position("bottom")
if j == 0 and i % 2 == 0:
axes[i, j].set_ylabel(a, visible=True)
# axes[i, j].yaxis.set_visible(True)
axes[i, j].set_ylabel(a)
axes[i, j].set_yticklabels(ticks)
axes[i, j].yaxis.set_ticks_position("left")
axes[i, j].yaxis.set_label_position("left")
if j == n - 1 and i % 2 == 1:
axes[i, j].set_ylabel(a, visible=True)
# axes[i, j].yaxis.set_visible(True)
axes[i, j].set_ylabel(a)
axes[i, j].set_yticklabels(ticks)
axes[i, j].yaxis.set_ticks_position("right")
axes[i, j].yaxis.set_label_position("right")
axes[i, j].grid(b=grid)
return axes
|
def scatter_matrix(frame, alpha=0.5, figsize=None, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
fig, axes = _subplots(nrows=n, ncols=n, figsize=figsize)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
for i, a in zip(range(n), df.columns):
for j, b in zip(range(n), df.columns):
axes[i, j].scatter(df[b], df[a], alpha=alpha, **kwds)
axes[i, j].yaxis.set_visible(False)
axes[i, j].xaxis.set_visible(False)
# setup labels
if i == 0 and j % 2 == 1:
axes[i, j].set_xlabel(b, visible=True)
axes[i, j].xaxis.set_visible(True)
axes[i, j].xaxis.set_ticks_position("top")
axes[i, j].xaxis.set_label_position("top")
if i == n - 1 and j % 2 == 0:
axes[i, j].set_xlabel(b, visible=True)
axes[i, j].xaxis.set_visible(True)
axes[i, j].xaxis.set_ticks_position("bottom")
axes[i, j].xaxis.set_label_position("bottom")
if j == 0 and i % 2 == 0:
axes[i, j].set_ylabel(a, visible=True)
axes[i, j].yaxis.set_visible(True)
axes[i, j].yaxis.set_ticks_position("left")
axes[i, j].yaxis.set_label_position("left")
if j == n - 1 and i % 2 == 1:
axes[i, j].set_ylabel(a, visible=True)
axes[i, j].yaxis.set_visible(True)
axes[i, j].yaxis.set_ticks_position("right")
axes[i, j].yaxis.set_label_position("right")
# ensure {x,y}lim off diagonal are the same as diagonal
for i in range(n):
for j in range(n):
if i != j:
axes[i, j].set_xlim(axes[j, j].get_xlim())
axes[i, j].set_ylim(axes[i, i].get_ylim())
return axes
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _iter_data(self):
from pandas.core.frame import DataFrame
if isinstance(self.data, (Series, np.ndarray)):
yield com._stringify(self.label), np.asarray(self.data)
elif isinstance(self.data, DataFrame):
df = self.data
if self.sort_columns:
columns = com._try_sort(df.columns)
else:
columns = df.columns
for col in columns:
empty = df[col].count() == 0
# is this right?
values = df[col].values if not empty else np.zeros(len(df))
col = com._stringify(col)
yield col, values
|
def _iter_data(self):
from pandas.core.frame import DataFrame
from pandas.core.series import Series
if isinstance(self.data, (Series, np.ndarray)):
yield com._stringify(self.label), np.asarray(self.data)
elif isinstance(self.data, DataFrame):
df = self.data
if self.sort_columns:
columns = com._try_sort(df.columns)
else:
columns = df.columns
for col in columns:
empty = df[col].count() == 0
# is this right?
values = df[col].values if not empty else np.zeros(len(df))
col = com._stringify(col)
yield col, values
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _adorn_subplots(self):
if self.subplots:
to_adorn = self.axes
else:
to_adorn = [self.ax]
# todo: sharex, sharey handling?
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.legend and not self.subplots:
self.ax.legend(loc="best", title=self.legend_title)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.ax.set_title(self.title)
if self._need_to_set_index:
xticklabels = [_stringify(key) for key in self.data.index]
for ax_ in self.axes:
# ax_.set_xticks(self.xticks)
ax_.set_xticklabels(xticklabels, rotation=self.rot)
|
def _adorn_subplots(self):
if self.subplots:
to_adorn = self.axes
else:
to_adorn = [self.ax]
# todo: sharex, sharey handling?
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.legend and not self.subplots:
self.ax.legend(loc="best")
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.ax.set_title(self.title)
if self._need_to_set_index:
xticklabels = [_stringify(key) for key in self.data.index]
for ax_ in self.axes:
# ax_.set_xticks(self.xticks)
ax_.set_xticklabels(xticklabels, rotation=self.rot)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _get_xticks(self):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date", "datetime64")
if self.use_index:
if index.is_numeric() or is_datetype:
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
else:
self._need_to_set_index = True
x = range(len(index))
else:
x = range(len(index))
return x
|
def _get_xticks(self):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date")
if self.use_index:
if index.is_numeric() or is_datetype:
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index.values
else:
self._need_to_set_index = True
x = range(len(index))
else:
x = range(len(index))
return x
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _make_plot(self):
# this is slightly deceptive
if self.use_index and self.has_ts_index:
data = self._maybe_convert_index(self.data)
self._make_ts_plot(data)
else:
x = self._get_xticks()
plotf = self._get_plot_function()
for i, (label, y) in enumerate(self._iter_data()):
if self.subplots:
ax = self.axes[i]
style = "k"
else:
style = "" # empty string ignored
ax = self.ax
if self.style:
style = self.style
plotf(ax, x, y, style, label=label, **self.kwds)
ax.grid(self.grid)
|
def _make_plot(self):
# this is slightly deceptive
x = self._get_xticks()
plotf = self._get_plot_function()
for i, (label, y) in enumerate(self._iter_data()):
if self.subplots:
ax = self.axes[i]
style = "k"
else:
style = "" # empty string ignored
ax = self.ax
if self.style:
style = self.style
plotf(ax, x, y, style, label=label, **self.kwds)
ax.grid(self.grid)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _post_plot_logic(self):
df = self.data
if self.legend:
if self.subplots:
for ax in self.axes:
ax.legend(loc="best")
else:
self.axes[0].legend(loc="best")
condition = (
not self.has_ts_index
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex)
)
for ax in self.axes:
if condition:
format_date_labels(ax)
|
def _post_plot_logic(self):
df = self.data
if self.subplots and self.legend:
self.axes[0].legend(loc="best")
condition = (
df.index.is_all_dates and not self.subplots or (self.subplots and self.sharex)
)
for ax in self.axes:
if condition:
format_date_labels(ax)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _make_plot(self):
colors = "brgyk"
rects = []
labels = []
ax = self.axes[0]
bar_f = self.bar_f
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data()):
kwds = self.kwds.copy()
if "color" not in kwds:
kwds["color"] = colors[i % len(colors)]
if self.subplots:
ax = self.axes[i]
rect = bar_f(ax, self.ax_pos, y, 0.5, start=pos_prior, linewidth=1, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
rect = bar_f(
ax, self.ax_pos, y, 0.5, start=start, label=label, linewidth=1, **kwds
)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
rect = bar_f(
ax,
self.ax_pos + i * 0.75 / K,
y,
0.75 / K,
start=pos_prior,
label=label,
**kwds,
)
rects.append(rect)
labels.append(label)
if self.legend and not self.subplots:
patches = [r[0] for r in rects]
# Legend to the right of the plot
# ax.legend(patches, labels, bbox_to_anchor=(1.05, 1),
# loc=2, borderaxespad=0.)
# self.fig.subplots_adjust(right=0.80)
ax.legend(patches, labels, loc="best", title=self.legend_title)
self.fig.subplots_adjust(top=0.8, wspace=0, hspace=0)
|
def _make_plot(self):
colors = "brgyk"
rects = []
labels = []
ax = self.axes[0]
bar_f = self.bar_f
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data()):
kwds = self.kwds.copy()
if "color" not in kwds:
kwds["color"] = colors[i % len(colors)]
if self.subplots:
ax = self.axes[i]
rect = bar_f(ax, self.ax_pos, y, 0.5, start=pos_prior, linewidth=1, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
rect = bar_f(
ax, self.ax_pos, y, 0.5, start=start, label=label, linewidth=1, **kwds
)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
rect = bar_f(
ax,
self.ax_pos + i * 0.75 / K,
y,
0.75 / K,
start=pos_prior,
label=label,
**kwds,
)
rects.append(rect)
labels.append(label)
if self.legend and not self.subplots:
patches = [r[0] for r in rects]
# Legend to the right of the plot
# ax.legend(patches, labels, bbox_to_anchor=(1.05, 1),
# loc=2, borderaxespad=0.)
# self.fig.subplots_adjust(right=0.80)
ax.legend(patches, labels, loc="best")
self.fig.subplots_adjust(top=0.8)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def plot_series(
series,
label=None,
kind="line",
use_index=True,
rot=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
ax=None,
style=None,
grid=True,
logy=False,
**kwds,
):
"""
Plot the input series with the index on the x-axis using matplotlib
Parameters
----------
label : label argument to provide to plot
kind : {'line', 'bar'}
rot : int, default 30
Rotation for tick labels
use_index : boolean, default True
Plot index as axis tick labels
ax : matplotlib axis object
If not passed, uses gca()
style : string, default matplotlib default
matplotlib line style to use
ax : matplotlib axis object
If not passed, uses gca()
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
kwds : keywords
Options to pass to matplotlib plotting method
Notes
-----
See matplotlib documentation online for more on this subject
"""
if kind == "line":
klass = LinePlot
elif kind in ("bar", "barh"):
klass = BarPlot
elif kind == "kde":
klass = KdePlot
if ax is None:
ax = _gca()
# is there harm in this?
if label is None:
label = series.name
plot_obj = klass(
series,
kind=kind,
rot=rot,
logy=logy,
ax=ax,
use_index=use_index,
style=style,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
legend=False,
grid=grid,
label=label,
**kwds,
)
plot_obj.generate()
plot_obj.draw()
return plot_obj.ax
|
def plot_series(
series,
label=None,
kind="line",
use_index=True,
rot=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
ax=None,
style=None,
grid=True,
logy=False,
**kwds,
):
"""
Plot the input series with the index on the x-axis using matplotlib
Parameters
----------
label : label argument to provide to plot
kind : {'line', 'bar'}
rot : int, default 30
Rotation for tick labels
use_index : boolean, default True
Plot index as axis tick labels
ax : matplotlib axis object
If not passed, uses gca()
style : string, default matplotlib default
matplotlib line style to use
ax : matplotlib axis object
If not passed, uses gca()
kind : {'line', 'bar', 'barh'}
bar : vertical bar plot
barh : horizontal bar plot
logy : boolean, default False
For line plots, use log scaling on y axis
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks
kwds : keywords
Options to pass to matplotlib plotting method
Notes
-----
See matplotlib documentation online for more on this subject
"""
if kind == "line":
klass = LinePlot
elif kind in ("bar", "barh"):
klass = BarPlot
if ax is None:
ax = _gca()
# is there harm in this?
if label is None:
label = series.name
plot_obj = klass(
series,
kind=kind,
rot=rot,
logy=logy,
ax=ax,
use_index=use_index,
style=style,
xticks=xticks,
yticks=yticks,
xlim=xlim,
ylim=ylim,
legend=False,
grid=grid,
label=label,
**kwds,
)
plot_obj.generate()
plot_obj.draw()
return plot_obj.ax
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def boxplot(
data, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None
):
"""
Make a box plot from DataFrame column optionally grouped b ysome columns or
other inputs
Parameters
----------
data : DataFrame or Series
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
fontsize : int or string
Returns
-------
ax : matplotlib.axes.AxesSubplot
"""
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({"x": data})
column = "x"
def plot_group(grouped, ax):
keys, values = zip(*grouped)
keys = [_stringify(x) for x in keys]
ax.boxplot(values)
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
if column == None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
if not isinstance(by, (list, tuple)):
by = [by]
fig, axes = _grouped_plot_by_column(
plot_group, data, columns=columns, by=by, grid=grid, figsize=figsize
)
# Return axes in multiplot case, maybe revisit later # 985
ret = axes
else:
if ax is None:
ax = _gca()
fig = ax.get_figure()
data = data._get_numeric_data()
if columns:
cols = columns
else:
cols = data.columns
keys = [_stringify(x) for x in cols]
# Return boxplot dict in single plot case
bp = ax.boxplot(list(data[cols].values.T))
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
ax.grid(grid)
ret = bp
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return ret
|
def boxplot(
data, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None
):
"""
Make a box plot from DataFrame column optionally grouped b ysome columns or
other inputs
Parameters
----------
data : DataFrame
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
fontsize : int or string
Returns
-------
ax : matplotlib.axes.AxesSubplot
"""
def plot_group(grouped, ax):
keys, values = zip(*grouped)
keys = [_stringify(x) for x in keys]
ax.boxplot(values)
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
if column == None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
if not isinstance(by, (list, tuple)):
by = [by]
fig, axes = _grouped_plot_by_column(
plot_group, data, columns=columns, by=by, grid=grid, figsize=figsize
)
# Return axes in multiplot case, maybe revisit later # 985
ret = axes
else:
if ax is None:
ax = _gca()
fig = ax.get_figure()
data = data._get_numeric_data()
if columns:
cols = columns
else:
cols = data.columns
keys = [_stringify(x) for x in cols]
# Return boxplot dict in single plot case
bp = ax.boxplot(list(data[cols].values.T))
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
ax.grid(grid)
ret = bp
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return ret
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False):
"""
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(str(y))
ax.set_xlabel(str(x))
ax.grid(grid)
return fig
|
def scatter_plot(data, x, y, by=None, ax=None, figsize=None):
"""
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(str(y))
ax.set_xlabel(str(x))
return fig
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals)
ax.grid(grid)
|
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def hist_frame(
data,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
**kwds,
):
"""
Draw Histogram the DataFrame's series using matplotlib / pylab.
Parameters
----------
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
kwds : other plotting keyword arguments
To be passed to hist function
"""
import matplotlib.pyplot as plt
n = len(data.columns)
rows, cols = 1, 1
while rows * cols < n:
if cols > rows:
rows += 1
else:
cols += 1
_, axes = _subplots(nrows=rows, ncols=cols, ax=ax, squeeze=False)
for i, col in enumerate(com._try_sort(data.columns)):
ax = axes[i / cols][i % cols]
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
ax.hist(data[col].dropna().values, **kwds)
ax.set_title(col)
ax.grid(grid)
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
for j in range(i + 1, rows * cols):
ax = axes[j / cols, j % cols]
ax.set_visible(False)
ax.get_figure().subplots_adjust(wspace=0.3, hspace=0.3)
return axes
|
def hist_frame(
data,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
**kwds,
):
"""
Draw Histogram the DataFrame's series using matplotlib / pylab.
Parameters
----------
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
kwds : other plotting keyword arguments
To be passed to hist function
"""
import matplotlib.pyplot as plt
n = len(data.columns)
k = 1
while k**2 < n:
k += 1
_, axes = _subplots(nrows=k, ncols=k, ax=ax)
for i, col in enumerate(com._try_sort(data.columns)):
ax = axes[i / k][i % k]
ax.hist(data[col].dropna().values, **kwds)
ax.set_title(col)
ax.grid(grid)
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def _subplots(
nrows=1,
ncols=1,
sharex=False,
sharey=False,
squeeze=True,
subplot_kw=None,
ax=None,
**fig_kw,
):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
nrows : int
Number of rows of the subplot grid. Defaults to 1.
ncols : int
Number of columns of the subplot grid. Defaults to 1.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharex : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array contaning Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
fig_kw : dict
Dict with keywords passed to the figure() call. Note that all keywords
not recognized above will be automatically included here.
ax : Matplotlib axis object, default None
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one supblot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows * ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw["sharex"] = ax0
if sharey:
subplot_kw["sharey"] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
axarr[i] = fig.add_subplot(nrows, ncols, i + 1, **subplot_kw)
if nplots > 1:
if sharex and nrows > 1:
for i, ax in enumerate(axarr):
if np.ceil(float(i + 1) / ncols) < nrows: # only last row
[label.set_visible(False) for label in ax.get_xticklabels()]
if sharey and ncols > 1:
for i, ax in enumerate(axarr):
if (i % ncols) != 0: # only first column
[label.set_visible(False) for label in ax.get_yticklabels()]
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
|
def _subplots(
nrows=1,
ncols=1,
sharex=False,
sharey=False,
squeeze=True,
subplot_kw=None,
ax=None,
**fig_kw,
):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
nrows : int
Number of rows of the subplot grid. Defaults to 1.
ncols : int
Number of columns of the subplot grid. Defaults to 1.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharex : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array contaning Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
fig_kw : dict
Dict with keywords passed to the figure() call. Note that all keywords
not recognized above will be automatically included here.
ax : Matplotlib axis object, default None
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one supblot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows * ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw["sharex"] = ax0
if sharey:
subplot_kw["sharey"] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
axarr[i] = fig.add_subplot(nrows, ncols, i + 1, **subplot_kw)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
return fig, axarr[0]
else:
return fig, axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
return fig, axarr.reshape(nrows, ncols)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = [
"np_datetime.c",
"np_datetime_strings.c",
"period.c",
"ujson.c",
"objToJSON.c",
"JSONtoObj.c",
"ultrajsonenc.c",
"ultrajsondec.c",
]
for root, dirs, files in list(os.walk("pandas")):
for f in files:
if f in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in (".pyc", ".so", ".o", ".pyd", ".c"):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == "__pycache__":
self._clean_trees.append(pjoin(root, d))
for d in ("build",):
if os.path.exists(d):
self._clean_trees.append(d)
|
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
for root, dirs, files in list(os.walk("pandas")):
for f in files:
if os.path.splitext(f)[-1] in (".pyc", ".so", ".o", ".pyd", ".c"):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == "__pycache__":
self._clean_trees.append(pjoin(root, d))
for d in ("build",):
if os.path.exists(d):
self._clean_trees.append(d)
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def run_process():
runner = BenchmarkRunner(
benchmarks,
REPO_PATH,
REPO_URL,
BUILD,
DB_PATH,
TMP_DIR,
PREPARE,
always_clean=True,
run_option="eod",
start_date=START_DATE,
module_dependencies=dependencies,
)
runner.run()
|
def run_process():
runner = BenchmarkRunner(
benchmarks,
REPO_PATH,
REPO_URL,
BUILD,
DB_PATH,
TMP_DIR,
PREPARE,
run_option="eod",
start_date=START_DATE,
module_dependencies=dependencies,
)
runner.run()
|
https://github.com/pandas-dev/pandas/issues/1328
|
In [17]: date_range(datetime.datetime.today(), periods=10, freq='2h20m')
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/chang/Dropbox/git/pandas/<ipython-input-17-ff4e03382573> in <module>()
----> 1 date_range(datetime.datetime.today(), periods=10, freq='2h20m')
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in date_range(start, end, periods, freq, tz, normalize)
1209 """
1210 return DatetimeIndex(start=start, end=end, periods=periods,
-> 1211 freq=freq, tz=tz, normalize=normalize)
1212
1213
/home/chang/Dropbox/git/pandas/pandas/tseries/index.pyc in __new__(cls, data, freq, start, end, periods, copy, name, tz, verify_integrity, normalize, **kwds)
202
203 if data is None and offset is None:
--> 204 raise ValueError("Must provide freq argument if no data is "
205 "supplied")
206
ValueError: Must provide freq argument if no data is supplied
|
ValueError
|
def __new__(
cls, data=None, freq=None, start=None, end=None, periods=None, copy=False, name=None
):
if isinstance(freq, Period):
freq = freq.freq
else:
freq = _freq_mod.get_standard_freq(freq)
if data is None:
subarr, freq = _get_ordinal_range(start, end, periods, freq)
subarr = subarr.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError(
"PeriodIndex() must be called with a "
"collection of some kind, %s was passed" % repr(data)
)
elif isinstance(data, Period):
raise ValueError("Data must be array of dates, strings, or Period objects")
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype="i8")
except:
data = np.array(data, dtype="O")
if freq is None:
raise ValueError("freq cannot be none")
data = _period_unbox_array(data, check=freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = _gfc(data.freq)
base2, mult2 = _gfc(freq)
data = lib.period_asfreq_arr(
data.values, base1, mult1, base2, mult2, b"E"
)
else:
if freq is None:
raise ValueError("freq cannot be none")
if data.dtype == np.datetime64:
data = dt64arr_to_periodarr(data, freq)
elif data.dtype == np.int64:
pass
else:
data = data.astype("i8")
data = np.array(data, dtype=np.int64, copy=False)
if (data <= 0).any():
raise ValueError("Found illegal (<= 0) values in data")
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
|
def __new__(
cls, data=None, freq=None, start=None, end=None, periods=None, copy=False, name=None
):
if isinstance(freq, Period):
freq = freq.freq
else:
freq = _freq_mod.get_standard_freq(freq)
if data is None:
subarr, freq = _get_ordinal_range(start, end, periods, freq)
subarr = subarr.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError(
"PeriodIndex() must be called with a "
"collection of some kind, %s was passed" % repr(data)
)
if isinstance(data, Period):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype="i8")
except:
data = np.array(data, dtype="O")
if freq is None:
raise ValueError("freq cannot be none")
data = _period_unbox_array(data, check=freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = _gfc(data.freq)
base2, mult2 = _gfc(freq)
data = lib.period_asfreq_arr(
data.values, base1, mult1, base2, mult2, b"E"
)
else:
if freq is None:
raise ValueError("freq cannot be none")
if data.dtype == np.datetime64:
data = dt64arr_to_periodarr(data, freq)
elif data.dtype == np.int64:
pass
else:
data = data.astype("i8")
data = np.array(data, dtype=np.int64, copy=False)
if (data <= 0).any():
raise ValueError("Found illegal (<= 0) values in data")
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
|
https://github.com/pandas-dev/pandas/issues/1118
|
In [10]: PeriodIndex(val, periods=20)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/Users/wesm/code/pandas/<ipython-input-10-e39b42220b85> in <module>()
----> 1 PeriodIndex(val, periods=20)
/Users/wesm/code/pandas/pandas/tseries/period.pyc in __new__(cls, data, freq, start, end, periods, copy, name)
568
569 if freq is None:
--> 570 raise ValueError('freq cannot be none')
571
572 data = _period_unbox_array(data, check=freq)
ValueError: freq cannot be none
In [11]: val
Out[11]: Period('02-Apr-2012', 'B')
|
ValueError
|
def __new__(
cls, data=None, freq=None, start=None, end=None, periods=None, copy=False, name=None
):
if isinstance(freq, Period):
freq = freq.freq
else:
freq = _freq_mod.get_standard_freq(freq)
if data is None:
subarr, freq = _get_ordinal_range(start, end, periods, freq)
subarr = subarr.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError(
"PeriodIndex() must be called with a "
"collection of some kind, %s was passed" % repr(data)
)
elif isinstance(data, Period):
raise ValueError("Data must be array of dates, strings, or Period objects")
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype="i8")
except:
data = np.array(data, dtype="O")
if freq is None and len(data) > 0:
freq = getattr(data[0], "freq")
if freq is None:
raise ValueError(
("freq not specified and cannot be inferred from first element")
)
data = _period_unbox_array(data, check=freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = _gfc(data.freq)
base2, mult2 = _gfc(freq)
data = lib.period_asfreq_arr(
data.values, base1, mult1, base2, mult2, b"E"
)
else:
if freq is None and len(data) > 0:
freq = getattr(data[0], "freq")
if freq is None:
raise ValueError(
("freq not specified and cannot be inferred from first element")
)
if data.dtype == np.datetime64:
data = dt64arr_to_periodarr(data, freq)
elif data.dtype == np.int64:
pass
else:
try:
data = data.astype("i8")
except:
data = data.astype("O")
data = _period_unbox_array(data, check=freq)
data = np.array(data, dtype=np.int64, copy=False)
if (data <= 0).any():
raise ValueError("Found illegal (<= 0) values in data")
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
|
def __new__(
cls, data=None, freq=None, start=None, end=None, periods=None, copy=False, name=None
):
if isinstance(freq, Period):
freq = freq.freq
else:
freq = _freq_mod.get_standard_freq(freq)
if data is None:
subarr, freq = _get_ordinal_range(start, end, periods, freq)
subarr = subarr.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError(
"PeriodIndex() must be called with a "
"collection of some kind, %s was passed" % repr(data)
)
elif isinstance(data, Period):
raise ValueError("Data must be array of dates, strings, or Period objects")
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype="i8")
except:
data = np.array(data, dtype="O")
if freq is None:
raise ValueError("freq cannot be none")
data = _period_unbox_array(data, check=freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = _gfc(data.freq)
base2, mult2 = _gfc(freq)
data = lib.period_asfreq_arr(
data.values, base1, mult1, base2, mult2, b"E"
)
else:
if freq is None:
raise ValueError("freq cannot be none")
if data.dtype == np.datetime64:
data = dt64arr_to_periodarr(data, freq)
elif data.dtype == np.int64:
pass
else:
data = data.astype("i8")
data = np.array(data, dtype=np.int64, copy=False)
if (data <= 0).any():
raise ValueError("Found illegal (<= 0) values in data")
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
|
https://github.com/pandas-dev/pandas/issues/1118
|
In [10]: PeriodIndex(val, periods=20)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/Users/wesm/code/pandas/<ipython-input-10-e39b42220b85> in <module>()
----> 1 PeriodIndex(val, periods=20)
/Users/wesm/code/pandas/pandas/tseries/period.pyc in __new__(cls, data, freq, start, end, periods, copy, name)
568
569 if freq is None:
--> 570 raise ValueError('freq cannot be none')
571
572 data = _period_unbox_array(data, check=freq)
ValueError: freq cannot be none
In [11]: val
Out[11]: Period('02-Apr-2012', 'B')
|
ValueError
|
def __new__(
cls,
data=None,
freq=None,
start=None,
end=None,
periods=None,
dtype=None,
copy=False,
name=None,
tz=None,
verify_integrity=True,
normalize=False,
**kwds,
):
warn = False
if "offset" in kwds and kwds["offset"]:
freq = kwds["offset"]
warn = True
if not isinstance(freq, datetools.DateOffset):
freq = datetools.to_offset(freq)
if warn:
import warnings
warnings.warn(
"parameter 'offset' is deprecated, please use 'freq' instead", FutureWarning
)
if isinstance(freq, basestring):
freq = datetools.get_offset(freq)
else:
if isinstance(freq, basestring):
freq = datetools.to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is supplied")
if data is None:
_normalized = True
if start is not None:
start = Timestamp(start)
if not isinstance(start, Timestamp):
raise ValueError("Failed to convert %s to timestamp" % start)
if normalize:
start = datetools.normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not isinstance(end, Timestamp):
raise ValueError("Failed to convert %s to timestamp" % end)
if normalize:
end = datetools.normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (
offset._should_cache()
and not (offset._normalize_cache and not _normalized)
and datetools._naive_in_cache_range(start, end)
):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
index = _generate_regular_range(start, end, periods, offset)
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError(
"DatetimeIndex() must be called with a "
"collection of some kind, %s was passed" % repr(data)
)
if isinstance(data, datetime):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype="O")
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data)
else:
data = np.asarray(data, dtype="M8[us]")
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype="M8[us]", copy=copy)
elif issubclass(data.dtype.type, np.datetime64):
subarr = np.array(data, dtype="M8[us]", copy=copy)
else:
subarr = np.array(data, dtype="M8[us]", copy=copy)
# TODO: this is horribly inefficient. If user passes data + offset, we
# need to make sure data points conform. Punting on this
if verify_integrity:
if offset is not None:
for i, ts in enumerate(subarr):
if not offset.onOffset(Timestamp(ts)):
val = Timestamp(offset.rollforward(ts)).value
subarr[i] = val
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
return subarr
|
def __new__(
cls,
data=None,
freq=None,
start=None,
end=None,
periods=None,
dtype=None,
copy=False,
name=None,
tz=None,
verify_integrity=True,
normalize=False,
**kwds,
):
warn = False
if "offset" in kwds and kwds["offset"]:
freq = kwds["offset"]
warn = True
if not isinstance(freq, datetools.DateOffset):
freq = datetools.to_offset(freq)
if warn:
import warnings
warnings.warn(
"parameter 'offset' is deprecated, please use 'freq' instead", FutureWarning
)
if isinstance(freq, basestring):
freq = datetools.get_offset(freq)
else:
if isinstance(freq, basestring):
freq = datetools.to_offset(freq)
offset = freq
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is supplied")
if data is None:
_normalized = True
if start is not None:
start = Timestamp(start)
if not isinstance(start, Timestamp):
raise ValueError("Failed to convert %s to timestamp" % start)
if normalize:
start = datetools.normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
end = Timestamp(end)
if not isinstance(end, Timestamp):
raise ValueError("Failed to convert %s to timestamp" % end)
if normalize:
end = datetools.normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
start, end, tz = tools._figure_out_timezone(start, end, tz)
if (
offset._should_cache()
and not (offset._normalize_cache and not _normalized)
and datetools._naive_in_cache_range(start, end)
):
index = cls._cached_range(
start, end, periods=periods, offset=offset, name=name
)
else:
if isinstance(offset, datetools.Tick):
if periods is None:
b, e = Timestamp(start), Timestamp(end)
data = np.arange(
b.value, e.value + 1, offset.us_stride(), dtype=np.int64
)
else:
b = Timestamp(start)
e = b.value + periods * offset.us_stride()
data = np.arange(b.value, e, offset.us_stride(), dtype=np.int64)
else:
xdr = datetools.generate_range(
start=start, end=end, periods=periods, offset=offset
)
data = _to_m8_array(list(xdr))
index = np.array(data, dtype=np.datetime64, copy=False)
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
return index
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError(
"DatetimeIndex() must be called with a "
"collection of some kind, %s was passed" % repr(data)
)
if isinstance(data, datetime):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype="O")
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data)
else:
data = np.asarray(data, dtype="M8[us]")
if issubclass(data.dtype.type, basestring):
subarr = _str_to_dt_array(data)
elif issubclass(data.dtype.type, np.integer):
subarr = np.array(data, dtype="M8[us]", copy=copy)
elif issubclass(data.dtype.type, np.datetime64):
subarr = np.array(data, dtype="M8[us]", copy=copy)
else:
subarr = np.array(data, dtype="M8[us]", copy=copy)
# TODO: this is horribly inefficient. If user passes data + offset, we
# need to make sure data points conform. Punting on this
if verify_integrity:
if offset is not None:
for i, ts in enumerate(subarr):
if not offset.onOffset(Timestamp(ts)):
val = Timestamp(offset.rollforward(ts)).value
subarr[i] = val
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
return subarr
|
https://github.com/pandas-dev/pandas/issues/1095
|
======================================================================
ERROR: test_shift_multiple_of_same_base (__main__.TestLegacySupport)
----------------------------------------------------------------------
Traceback (most recent call last):
File "pandas/tests/test_timeseries.py", line 624, in test_shift_multiple_of_same_base
ts = Series(np.random.randn(5), index=date_range('1/1/2000', freq='H'))
File "/home/wesm/code/pandas/pandas/core/daterange.py", line 65, in date_range
freq=freq, tz=tz, normalize=normalize)
File "/home/wesm/code/pandas/pandas/core/index.py", line 1279, in __new__
b, e = Timestamp(start), Timestamp(end)
File "datetime.pyx", line 59, in pandas._tseries.Timestamp.__new__ (pandas/src/tseries.c:27720)
AttributeError: 'NoneType' object has no attribute 'dtval'
----------------------------------------------------------------------
|
AttributeError
|
def generate_range(start=None, end=None, periods=None, offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, optional
Note
----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
if end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if _count_not_none(start, end, periods) < 2:
raise ValueError("Must specify 2 of start, end, periods")
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
next_date = cur
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError("Offset %s did not increment date" % offset)
cur = next_date
|
def generate_range(start=None, end=None, periods=None, offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, optional
Note
----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
if end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
next_date = cur
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError("Offset %s did not increment date" % offset)
cur = next_date
|
https://github.com/pandas-dev/pandas/issues/1095
|
======================================================================
ERROR: test_shift_multiple_of_same_base (__main__.TestLegacySupport)
----------------------------------------------------------------------
Traceback (most recent call last):
File "pandas/tests/test_timeseries.py", line 624, in test_shift_multiple_of_same_base
ts = Series(np.random.randn(5), index=date_range('1/1/2000', freq='H'))
File "/home/wesm/code/pandas/pandas/core/daterange.py", line 65, in date_range
freq=freq, tz=tz, normalize=normalize)
File "/home/wesm/code/pandas/pandas/core/index.py", line 1279, in __new__
b, e = Timestamp(start), Timestamp(end)
File "datetime.pyx", line 59, in pandas._tseries.Timestamp.__new__ (pandas/src/tseries.c:27720)
AttributeError: 'NoneType' object has no attribute 'dtval'
----------------------------------------------------------------------
|
AttributeError
|
def _get_handle(path, mode, encoding=None):
if py3compat.PY3: # pragma: no cover
if encoding:
f = open(path, mode, encoding=encoding)
else:
f = open(path, mode, errors="replace")
else:
f = open(path, mode)
return f
|
def _get_handle(path, mode, encoding=None):
if py3compat.PY3: # pragma: no cover
f = open(path, mode, encoding=encoding)
else:
f = open(path, mode)
return f
|
https://github.com/pandas-dev/pandas/issues/795
|
In [9]: df = read_clipboard(header=None, sep='\s+')
In [10]: df
Out[10]: ---------------------------------------------------------------------------
UnicodeDecodeError Traceback (most recent call last)
/Users/wesm/<ipython-input-10-7ed0097d7e9e> in <module>()
----> 1 df
/Users/wesm/code/repos/ipython/IPython/core/displayhook.pyc in __call__(self, result)
236 self.start_displayhook()
237 self.write_output_prompt()
--> 238 format_dict = self.compute_format_data(result)
239 self.write_format_data(format_dict)
240 self.update_user_ns(result)
/Users/wesm/code/repos/ipython/IPython/core/displayhook.pyc in compute_format_data(self, result)
148 MIME type representation of the object.
149 """
--> 150 return self.shell.display_formatter.format(result)
151
152 def write_format_data(self, format_dict):
/Users/wesm/code/repos/ipython/IPython/core/formatters.pyc in format(self, obj, include, exclude)
124 continue
125 try:
--> 126 data = formatter(obj)
127 except:
128 # FIXME: log the exception
/Users/wesm/code/repos/ipython/IPython/core/formatters.pyc in __call__(self, obj)
445 type_pprinters=self.type_printers,
446 deferred_pprinters=self.deferred_printers)
--> 447 printer.pretty(obj)
448 printer.flush()
449 return stream.getvalue()
/Users/wesm/code/repos/ipython/IPython/lib/pretty.pyc in pretty(self, obj)
349 if hasattr(obj_class, '_repr_pretty_'):
350 return obj_class._repr_pretty_(obj, self, cycle)
--> 351 return _default_pprint(obj, self, cycle)
352 finally:
353 self.end_group()
/Users/wesm/code/repos/ipython/IPython/lib/pretty.pyc in _default_pprint(obj, p, cycle)
469 if getattr(klass, '__repr__', None) not in _baseclass_reprs:
470 # A user-provided repr.
--> 471 p.text(repr(obj))
472 return
473 p.begin_group(1, '<')
/Users/wesm/code/pandas/pandas/core/frame.pyc in __repr__(self)
458 self.info(buf=buf, verbose=self._verbose_info)
459 else:
--> 460 self.to_string(buf=buf)
461 value = buf.getvalue()
462 if max([len(l) for l in value.split('\n')]) > terminal_width:
/Users/wesm/code/pandas/pandas/core/frame.pyc in to_string(self, buf, columns, col_space, colSpace, header, index, na_rep, formatters, float_format, sparsify, nanRep, index_names, justify, force_unicode)
1038 index_names=index_names,
1039 header=header, index=index)
-> 1040 formatter.to_string(force_unicode=force_unicode)
1041
1042 if buf is None:
/Users/wesm/code/pandas/pandas/core/format.pyc in to_string(self, force_unicode)
193
194 if self.index:
--> 195 to_write.append(adjoin(1, str_index, *stringified))
196 else:
197 to_write.append(adjoin(1, *stringified))
/Users/wesm/code/pandas/pandas/core/common.pyc in adjoin(space, *lists)
398 toJoin = zip(*newLists)
399 for lines in toJoin:
--> 400 outLines.append(''.join(lines))
401 return '\n'.join(outLines)
402
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 1: ordinal not in range(128)
|
UnicodeDecodeError
|
def _getitem_lowerdim(self, tup):
from pandas.core.frame import DataFrame
ax0 = self.obj._get_axis(0)
# a bit kludgy
if isinstance(ax0, MultiIndex):
try:
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception:
if isinstance(tup[0], slice):
raise IndexingError
if tup[0] not in ax0:
raise
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if _is_label_like(key):
section = self._getitem_axis(key, axis=i)
# might have been a MultiIndex
if section.ndim == self.ndim:
new_key = tup[:i] + (_NS,) + tup[i + 1 :]
# new_key = tup[:i] + tup[i+1:]
else:
new_key = tup[:i] + tup[i + 1 :]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if isinstance(section, DataFrame) and i > 0 and len(new_key) == 2:
a, b = new_key
new_key = b, a
if len(new_key) == 1:
(new_key,) = new_key
return section.ix[new_key]
raise IndexingError("not applicable")
|
def _getitem_lowerdim(self, tup):
from pandas.core.frame import DataFrame
ax0 = self.obj._get_axis(0)
# a bit kludgy
if isinstance(ax0, MultiIndex):
try:
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception:
if tup[0] not in ax0:
raise
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if _is_label_like(key):
section = self._getitem_axis(key, axis=i)
# might have been a MultiIndex
if section.ndim == self.ndim:
new_key = tup[:i] + (_NS,) + tup[i + 1 :]
# new_key = tup[:i] + tup[i+1:]
else:
new_key = tup[:i] + tup[i + 1 :]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if isinstance(section, DataFrame) and i > 0 and len(new_key) == 2:
a, b = new_key
new_key = b, a
if len(new_key) == 1:
(new_key,) = new_key
return section.ix[new_key]
raise IndexingError("not applicable")
|
https://github.com/pandas-dev/pandas/issues/709
|
In [20]: df
Out[20]:
value
id
t1 a 1
b 2
c 3
t2 a 7
b 8
In [21]: df.ix[:, 'value']
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/home/wesm/Dropbox/book/svn/<ipython-input-21-7d089958dc90> in <module>()
----> 1 df.ix[:, 'value']
/home/wesm/code/pandas/pandas/core/indexing.pyc in __getitem__(self, key)
28 pass
29
---> 30 return self._getitem_tuple(key)
31 else:
32 return self._getitem_axis(key, axis=0)
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_tuple(self, tup)
97 def _getitem_tuple(self, tup):
98 try:
---> 99 return self._getitem_lowerdim(tup)
100 except IndexingError:
101 pass
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_lowerdim(self, tup)
126 pass
127 except Exception:
--> 128 if tup[0] not in ax0:
129 raise
130
/home/wesm/code/pandas/pandas/core/index.pyc in __contains__(self, key)
209
210 def __contains__(self, key):
--> 211 return key in self._engine
212
213 def __hash__(self):
/home/wesm/code/pandas/pandas/_engines.so in pandas._engines.DictIndexEngine.__contains__ (pandas/src/engines.c:1958)()
TypeError: unhashable type
|
TypeError
|
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
|
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
|
https://github.com/pandas-dev/pandas/issues/709
|
In [20]: df
Out[20]:
value
id
t1 a 1
b 2
c 3
t2 a 7
b 8
In [21]: df.ix[:, 'value']
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/home/wesm/Dropbox/book/svn/<ipython-input-21-7d089958dc90> in <module>()
----> 1 df.ix[:, 'value']
/home/wesm/code/pandas/pandas/core/indexing.pyc in __getitem__(self, key)
28 pass
29
---> 30 return self._getitem_tuple(key)
31 else:
32 return self._getitem_axis(key, axis=0)
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_tuple(self, tup)
97 def _getitem_tuple(self, tup):
98 try:
---> 99 return self._getitem_lowerdim(tup)
100 except IndexingError:
101 pass
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_lowerdim(self, tup)
126 pass
127 except Exception:
--> 128 if tup[0] not in ax0:
129 raise
130
/home/wesm/code/pandas/pandas/core/index.pyc in __contains__(self, key)
209
210 def __contains__(self, key):
--> 211 return key in self._engine
212
213 def __hash__(self):
/home/wesm/code/pandas/pandas/_engines.so in pandas._engines.DictIndexEngine.__contains__ (pandas/src/engines.c:1958)()
TypeError: unhashable type
|
TypeError
|
def _convert_to_indexer(self, obj, axis=0):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
"In the face of ambiguity, refuse the temptation to guess."
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
is_int_index = _is_integer_index(labels)
if com.is_integer(obj) and not is_int_index:
return obj
try:
return labels.get_loc(obj)
except (KeyError, TypeError):
pass
if isinstance(obj, slice):
int_slice = _is_index_slice(obj)
null_slice = obj.start is None and obj.stop is None
# could have integers in the first level of the MultiIndex
position_slice = (
int_slice
and not labels.inferred_type == "integer"
and not isinstance(labels, MultiIndex)
)
if null_slice or position_slice:
slicer = obj
else:
try:
i, j = labels.slice_locs(obj.start, obj.stop)
slicer = slice(i, j, obj.step)
except Exception:
if _is_index_slice(obj):
if labels.inferred_type == "integer":
raise
slicer = obj
else:
raise
return slicer
elif _is_list_like(obj):
if com._is_bool_indexer(obj):
objarr = _check_bool_indexer(labels, obj)
return objarr
else:
objarr = _asarray_tuplesafe(obj)
# If have integer labels, defer to label-based indexing
if _is_integer_dtype(objarr) and not is_int_index:
return objarr
indexer = labels.get_indexer(objarr)
mask = indexer == -1
if mask.any():
raise KeyError("%s not in index" % objarr[mask])
return indexer
else:
return labels.get_loc(obj)
|
def _convert_to_indexer(self, obj, axis=0):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
"In the face of ambiguity, refuse the temptation to guess."
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
try:
return labels.get_loc(obj)
except (KeyError, TypeError):
pass
is_int_index = _is_integer_index(labels)
if isinstance(obj, slice):
int_slice = _is_index_slice(obj)
null_slice = obj.start is None and obj.stop is None
# could have integers in the first level of the MultiIndex
position_slice = (
int_slice
and not labels.inferred_type == "integer"
and not isinstance(labels, MultiIndex)
)
if null_slice or position_slice:
slicer = obj
else:
try:
i, j = labels.slice_locs(obj.start, obj.stop)
slicer = slice(i, j, obj.step)
except Exception:
if _is_index_slice(obj):
if labels.inferred_type == "integer":
raise
slicer = obj
else:
raise
return slicer
elif _is_list_like(obj):
if com._is_bool_indexer(obj):
objarr = _check_bool_indexer(labels, obj)
return objarr
else:
objarr = _asarray_tuplesafe(obj)
# If have integer labels, defer to label-based indexing
if _is_integer_dtype(objarr) and not is_int_index:
return objarr
indexer = labels.get_indexer(objarr)
mask = indexer == -1
if mask.any():
raise KeyError("%s not in index" % objarr[mask])
return indexer
else:
if com.is_integer(obj) and not is_int_index:
return obj
return labels.get_loc(obj)
|
https://github.com/pandas-dev/pandas/issues/709
|
In [20]: df
Out[20]:
value
id
t1 a 1
b 2
c 3
t2 a 7
b 8
In [21]: df.ix[:, 'value']
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/home/wesm/Dropbox/book/svn/<ipython-input-21-7d089958dc90> in <module>()
----> 1 df.ix[:, 'value']
/home/wesm/code/pandas/pandas/core/indexing.pyc in __getitem__(self, key)
28 pass
29
---> 30 return self._getitem_tuple(key)
31 else:
32 return self._getitem_axis(key, axis=0)
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_tuple(self, tup)
97 def _getitem_tuple(self, tup):
98 try:
---> 99 return self._getitem_lowerdim(tup)
100 except IndexingError:
101 pass
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_lowerdim(self, tup)
126 pass
127 except Exception:
--> 128 if tup[0] not in ax0:
129 raise
130
/home/wesm/code/pandas/pandas/core/index.pyc in __contains__(self, key)
209
210 def __contains__(self, key):
--> 211 return key in self._engine
212
213 def __hash__(self):
/home/wesm/code/pandas/pandas/_engines.so in pandas._engines.DictIndexEngine.__contains__ (pandas/src/engines.c:1958)()
TypeError: unhashable type
|
TypeError
|
def unique(self):
"""
Return array of unique values in the Series. Significantly faster than
numpy.unique
Returns
-------
uniques : ndarray
"""
values = self.values
if issubclass(values.dtype.type, np.floating):
table = lib.Float64HashTable(len(values))
uniques = np.array(table.unique(values), dtype="f8")
else:
if not values.dtype == np.object_:
values = values.astype("O")
table = lib.PyObjectHashTable(len(values))
uniques = lib.list_to_object_array(table.unique(values))
uniques = lib.maybe_convert_objects(uniques)
return uniques
|
def unique(self):
"""
Return array of unique values in the Series. Significantly faster than
numpy.unique
Returns
-------
uniques : ndarray
"""
values = self.values
if not values.dtype == np.object_:
values = values.astype("O")
table = lib.PyObjectHashTable(len(values))
uniques = lib.list_to_object_array(table.unique(values))
return lib.maybe_convert_objects(uniques)
|
https://github.com/pandas-dev/pandas/issues/709
|
In [20]: df
Out[20]:
value
id
t1 a 1
b 2
c 3
t2 a 7
b 8
In [21]: df.ix[:, 'value']
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/home/wesm/Dropbox/book/svn/<ipython-input-21-7d089958dc90> in <module>()
----> 1 df.ix[:, 'value']
/home/wesm/code/pandas/pandas/core/indexing.pyc in __getitem__(self, key)
28 pass
29
---> 30 return self._getitem_tuple(key)
31 else:
32 return self._getitem_axis(key, axis=0)
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_tuple(self, tup)
97 def _getitem_tuple(self, tup):
98 try:
---> 99 return self._getitem_lowerdim(tup)
100 except IndexingError:
101 pass
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_lowerdim(self, tup)
126 pass
127 except Exception:
--> 128 if tup[0] not in ax0:
129 raise
130
/home/wesm/code/pandas/pandas/core/index.pyc in __contains__(self, key)
209
210 def __contains__(self, key):
--> 211 return key in self._engine
212
213 def __hash__(self):
/home/wesm/code/pandas/pandas/_engines.so in pandas._engines.DictIndexEngine.__contains__ (pandas/src/engines.c:1958)()
TypeError: unhashable type
|
TypeError
|
def from_csv(
cls, path, sep=",", parse_dates=True, header=None, index_col=0, encoding=None
):
"""
Read delimited file into Series
Parameters
----------
path : string
sep : string, default ','
Field delimiter
parse_dates : boolean, default True
Parse dates. Different default from read_table
header : int, default 0
Row to use at header (skip prior rows)
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
Returns
-------
y : Series
"""
from pandas.core.frame import DataFrame
df = DataFrame.from_csv(
path,
header=header,
index_col=index_col,
sep=sep,
parse_dates=parse_dates,
encoding=encoding,
)
return df.ix[:, 0]
|
def from_csv(cls, path, sep=",", parse_dates=True):
"""
Read delimited file into Series
Parameters
----------
path : string
sep : string, default ','
Field delimiter
parse_dates : boolean, default True
Parse dates. Different default from read_table
Returns
-------
y : Series
"""
from pandas.core.frame import DataFrame
df = DataFrame.from_csv(path, header=None, sep=sep, parse_dates=parse_dates)
return df[df.columns[0]]
|
https://github.com/pandas-dev/pandas/issues/709
|
In [20]: df
Out[20]:
value
id
t1 a 1
b 2
c 3
t2 a 7
b 8
In [21]: df.ix[:, 'value']
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/home/wesm/Dropbox/book/svn/<ipython-input-21-7d089958dc90> in <module>()
----> 1 df.ix[:, 'value']
/home/wesm/code/pandas/pandas/core/indexing.pyc in __getitem__(self, key)
28 pass
29
---> 30 return self._getitem_tuple(key)
31 else:
32 return self._getitem_axis(key, axis=0)
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_tuple(self, tup)
97 def _getitem_tuple(self, tup):
98 try:
---> 99 return self._getitem_lowerdim(tup)
100 except IndexingError:
101 pass
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_lowerdim(self, tup)
126 pass
127 except Exception:
--> 128 if tup[0] not in ax0:
129 raise
130
/home/wesm/code/pandas/pandas/core/index.pyc in __contains__(self, key)
209
210 def __contains__(self, key):
--> 211 return key in self._engine
212
213 def __hash__(self):
/home/wesm/code/pandas/pandas/_engines.so in pandas._engines.DictIndexEngine.__contains__ (pandas/src/engines.c:1958)()
TypeError: unhashable type
|
TypeError
|
def to_csv(
self,
path,
index=True,
sep=",",
na_rep="",
header=False,
index_label=None,
mode="w",
nanRep=None,
encoding=None,
):
"""
Write Series to a comma-separated values (csv) file
Parameters
----------
path : string
File path
nanRep : string, default ''
Missing data rep'n
header : boolean, default False
Write out series name
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
mode : Python write mode, default 'w'
sep : character, default ","
Field delimiter for the output file.
encoding : string, optional
a string representing the encoding to use if the contents are
non-ascii, for python versions prior to 3
"""
from pandas.core.frame import DataFrame
df = DataFrame(self)
df.to_csv(
path,
index=index,
sep=sep,
na_rep=na_rep,
header=header,
index_label=index_label,
mode=mode,
nanRep=nanRep,
encoding=encoding,
)
|
def to_csv(self, path, index=True):
"""
Write the Series to a CSV file
Parameters
----------
path : string or None
Output filepath. If None, write to stdout
index : bool, optional
Include the index as row names or not
"""
f = open(path, "w")
csvout = csv.writer(f, lineterminator="\n")
csvout.writerows(self.iteritems(index))
f.close()
|
https://github.com/pandas-dev/pandas/issues/709
|
In [20]: df
Out[20]:
value
id
t1 a 1
b 2
c 3
t2 a 7
b 8
In [21]: df.ix[:, 'value']
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/home/wesm/Dropbox/book/svn/<ipython-input-21-7d089958dc90> in <module>()
----> 1 df.ix[:, 'value']
/home/wesm/code/pandas/pandas/core/indexing.pyc in __getitem__(self, key)
28 pass
29
---> 30 return self._getitem_tuple(key)
31 else:
32 return self._getitem_axis(key, axis=0)
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_tuple(self, tup)
97 def _getitem_tuple(self, tup):
98 try:
---> 99 return self._getitem_lowerdim(tup)
100 except IndexingError:
101 pass
/home/wesm/code/pandas/pandas/core/indexing.pyc in _getitem_lowerdim(self, tup)
126 pass
127 except Exception:
--> 128 if tup[0] not in ax0:
129 raise
130
/home/wesm/code/pandas/pandas/core/index.pyc in __contains__(self, key)
209
210 def __contains__(self, key):
--> 211 return key in self._engine
212
213 def __hash__(self):
/home/wesm/code/pandas/pandas/_engines.so in pandas._engines.DictIndexEngine.__contains__ (pandas/src/engines.c:1958)()
TypeError: unhashable type
|
TypeError
|
def _convert_to_indexer(self, obj, axis=0):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
"In the face of ambiguity, refuse the temptation to guess."
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
index = self.obj._get_axis(axis)
try:
return index.get_loc(obj)
except (KeyError, TypeError):
pass
is_int_index = _is_integer_index(index)
if isinstance(obj, slice):
if _is_label_slice(index, obj):
i, j = index.slice_locs(obj.start, obj.stop)
if obj.step is not None:
raise Exception("Non-zero step not supported with label-based slicing")
return slice(i, j)
else:
return obj
elif _is_list_like(obj):
objarr = _asarray_tuplesafe(obj)
if objarr.dtype == np.bool_:
if not obj.index.equals(index):
raise IndexingError(
"Cannot use boolean index with misaligned or unequal labels"
)
return objarr
else:
# If have integer labels, defer to label-based indexing
if _is_integer_dtype(objarr) and not is_int_index:
return objarr
indexer = index.get_indexer(objarr)
mask = indexer == -1
if mask.any():
raise KeyError("%s not in index" % objarr[mask])
return indexer
else:
if _is_int_like(obj) and not is_int_index:
return obj
return index.get_loc(obj)
|
def _convert_to_indexer(self, obj, axis=0):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
"In the face of ambiguity, refuse the temptation to guess."
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
index = self.obj._get_axis(axis)
is_int_index = _is_integer_index(index)
if isinstance(obj, slice):
if _is_label_slice(index, obj):
i, j = index.slice_locs(obj.start, obj.stop)
if obj.step is not None:
raise Exception("Non-zero step not supported with label-based slicing")
return slice(i, j)
else:
return obj
elif _is_list_like(obj):
objarr = _asarray_tuplesafe(obj)
if objarr.dtype == np.bool_:
if not obj.index.equals(index):
raise IndexingError(
"Cannot use boolean index with misaligned or unequal labels"
)
return objarr
else:
# If have integer labels, defer to label-based indexing
if _is_integer_dtype(objarr) and not is_int_index:
return objarr
indexer = index.get_indexer(objarr)
mask = indexer == -1
if mask.any():
raise KeyError("%s not in index" % objarr[mask])
return indexer
else:
if _is_int_like(obj) and not is_int_index:
return obj
return index.get_loc(obj)
|
https://github.com/pandas-dev/pandas/issues/551
|
Hi, I am currently trying to access and change elements of a multiindexed DataFrame with .ix. This works on read but not on write:
In [342]: arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
In [343]: tuples = zip(*arrays)
In [344]: index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
In [345]: a=DataFrame(randn(6, 3), index=index[:6])
In [346]: a.ix[('bar','two'),1]
Out[346]: -0.574310975217078
In [347]: a.ix[('bar','two'),1]=999
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/Users/c.prinoth/<ipython-input-347-755b7b16a7da> in <module>()
----> 1 a.ix[('bar','two'),1]=999
/usr/local/Cellar/python/2.7.2/lib/python2.7/site-packages/pandas/core/indexing.pyc in __setitem__(self, key, value)
56 raise IndexingError('only tuples of length <= %d supported',
57 self.ndim)
---> 58 indexer = self._convert_tuple(key)
59 else:
60 indexer = self._convert_to_indexer(key)
/usr/local/Cellar/python/2.7.2/lib/python2.7/site-packages/pandas/core/indexing.pyc in _convert_tuple(self, key)
65 keyidx = []
66 for i, k in enumerate(key):
---> 67 idx = self._convert_to_indexer(k, axis=i)
68 keyidx.append(idx)
69 return _maybe_convert_ix(*keyidx)
/usr/local/Cellar/python/2.7.2/lib/python2.7/site-packages/pandas/core/indexing.pyc in _convert_to_indexer(self, obj, axis)
239 mask = indexer == -1
240 if mask.any():
--> 241 raise KeyError('%s not in index' % objarr[mask])
242
243 return indexer
KeyError: '[bar two] not in index'
Is this a bug or am I using the index incorrectly?
|
KeyError
|
def unstack(self, level=-1):
"""
"Unstack" level from MultiLevel index to produce reshaped DataFrame
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to unstack, can pass level name
Examples
--------
>>> s
one a 1.
one b 2.
two a 3.
two b 4.
>>> s.unstack(level=-1)
a b
one 1. 2.
two 3. 4.
>>> s.unstack(level=0)
one two
a 1. 2.
b 3. 4.
Returns
-------
unstacked : DataFrame
"""
from pandas.core.reshape import unstack
if isinstance(level, (tuple, list)):
result = self
to_unstack = level
while to_unstack:
lev = to_unstack[0]
result = unstack(result, lev)
to_unstack = [
other - 1 if other > lev else other for other in to_unstack[1:]
]
return result
else:
return unstack(self, level)
|
def unstack(self, level=-1):
"""
"Unstack" level from MultiLevel index to produce reshaped DataFrame
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to unstack, can pass level name
Examples
--------
>>> s
one a 1.
one b 2.
two a 3.
two b 4.
>>> s.unstack(level=-1)
a b
one 1. 2.
two 3. 4.
>>> s.unstack(level=0)
one two
a 1. 2.
b 3. 4.
Returns
-------
unstacked : DataFrame
"""
from pandas.core.reshape import unstack
if isinstance(level, (tuple, list)):
result = self
for lev in level:
result = unstack(result, lev)
return result
else:
return unstack(self, level)
|
https://github.com/pandas-dev/pandas/issues/451
|
In [180]: df.unstack(level=[0,1])
Out[180]:
A A B B
bar foo bar foo
a b a b a b a b
0 266.666m 806.406m -1.627 -494.049m 246.100m 894.707m 1.301 120.371m
1 -1.319 638.371m 955.212m -55.806m 475.675m 409.483m 580.450m 169.247m
In [177]: df.unstack(level=[1,2])
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
...
IndexError: pop index out of range
In [178]: df.unstack(level=[1,1])
Out[178]:
A A B B
0 1 0 1
a b a b a b a b
bar 266.666m 806.406m -1.319 638.371m 246.100m 894.707m 475.675m 409.483m
foo -1.627 -494.049m 955.212m -55.806m 1.301 120.371m 580.450m 169.247m
|
IndexError
|
def set_index(self, col_or_cols, drop=True, inplace=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
col_or_cols : column label or list of column labels
drop : boolean, default True
Delete columns to be used as the new index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
Returns
-------
dataframe : DataFrame
"""
cols = col_or_cols
if not isinstance(col_or_cols, (list, tuple)):
cols = [col_or_cols]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
for col in cols:
level = frame[col]
if drop:
del frame[col]
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=cols)
if not index._verify_integrity():
duplicates = index._get_duplicates()
raise Exception("Index has duplicate keys: %s" % duplicates)
# clear up memory usage
index._cleanup()
frame.index = index
return frame
|
def set_index(self, col_or_cols, drop=True, inplace=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
col_or_cols : column label or list of column labels
drop : boolean, default True
Delete columns to be used as the new index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
Returns
-------
dataframe : DataFrame
"""
cols = col_or_cols
if not isinstance(col_or_cols, (list, tuple)):
cols = [col_or_cols]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
for col in cols:
level = frame[col]
if drop:
del frame[col]
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=cols)
if not index._verify_integrity():
duplicates = index._get_duplicates()
raise Exception("Index has duplicate keys: %s" % duplicates)
frame.index = index
return frame
|
https://github.com/pandas-dev/pandas/issues/366
|
df.as_matrix()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/frame.py", line 671, in as_matrix
self._consolidate_inplace()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/generic.py", line 267, in _consolidate_inplace
self._data = self._data.consolidate()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 516, in consolidate
new_blocks = _consolidate(self.blocks, self.items)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 938, in _consolidate
new_block = _merge_blocks(list(group_blocks), items)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 951, in _merge_blocks
do_integrity_check=True)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 211, in make_block
do_integrity_check=do_integrity_check)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 34, in __init__
self._check_integrity()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 40, in _check_integrity
return (self.ref_locs[1:] > self.ref_locs[:-1]).all()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 47, in ref_locs
assert((indexer != -1).all())
AssertionError
|
AssertionError
|
def unique_with_labels(values):
uniques = Index(lib.fast_unique(values))
labels = lib.get_unique_labels(values, uniques.indexMap)
uniques._cleanup()
return uniques, labels
|
def unique_with_labels(values):
uniques = Index(lib.fast_unique(values))
labels = lib.get_unique_labels(values, uniques.indexMap)
return uniques, labels
|
https://github.com/pandas-dev/pandas/issues/366
|
df.as_matrix()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/frame.py", line 671, in as_matrix
self._consolidate_inplace()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/generic.py", line 267, in _consolidate_inplace
self._data = self._data.consolidate()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 516, in consolidate
new_blocks = _consolidate(self.blocks, self.items)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 938, in _consolidate
new_block = _merge_blocks(list(group_blocks), items)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 951, in _merge_blocks
do_integrity_check=True)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 211, in make_block
do_integrity_check=do_integrity_check)
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 34, in __init__
self._check_integrity()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 40, in _check_integrity
return (self.ref_locs[1:] > self.ref_locs[:-1]).all()
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/pandas/core/internals.py", line 47, in ref_locs
assert((indexer != -1).all())
AssertionError
|
AssertionError
|
def from_blocks(cls, blocks, index):
# also checks for overlap
items = _union_block_items(blocks)
return BlockManager(blocks, [items, index])
|
def from_blocks(cls, blocks, index):
# also checks for overlap
items = _union_block_items(blocks)
ndim = blocks[0].ndim
return BlockManager(blocks, [items, index])
|
https://github.com/pandas-dev/pandas/issues/98
|
In [1]: df = DataFrame(index=['a', 'b', 'c'])
In [2]: df.ix['a']
ERROR: An unexpected error occurred while tokenizing input
The following traceback may be corrupted or invalid
The error message is: ('EOF in multi-line statement', (2, 0))
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
/home/wesm/code/pandas/<ipython-input-2-6181dcf1ffea> in <module>()
----> 1 df.ix['a']
/home/wesm/code/pandas/pandas/core/indexing.pyc in __getitem__(self, key)
102 return self._fancy_getitem(key, axis=0)
103 else:
--> 104 return self._fancy_getitem_axis(key, axis=0)
105
106 def __setitem__(self, key, value):
/home/wesm/code/pandas/pandas/core/indexing.pyc in _fancy_getitem_axis(self, key, axis)
200 return self.frame.xs(idx)
201 else:
--> 202 return self.frame.xs(idx, copy=False)
203 else:
204 col = key
/home/wesm/code/pandas/pandas/core/frame.pyc in xs(self, key, copy)
839
840 self._consolidate_inplace()
--> 841 values = self._data.xs(key, axis=1, copy=copy)
842 return Series(values.as_matrix(), index=self.columns)
843
/home/wesm/code/pandas/pandas/core/internals.pyc in xs(self, key, axis, copy)
451 new_blocks.append(newb)
452 else:
--> 453 vals = self.blocks[0].values[slicer]
454 if copy:
455 vals = vals.copy()
IndexError: list index out of range
|
IndexError
|
def xs(self, key, axis=1, copy=True):
assert axis >= 1
i = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = i
slicer = tuple(slicer)
new_axes = list(self.axes)
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
if not copy:
raise Exception(
"cannot get view of mixed-type or non-consolidated DataFrame"
)
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.append(newb)
elif len(self.blocks) == 1:
vals = self.blocks[0].values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
|
def xs(self, key, axis=1, copy=True):
from pandas.core.series import Series
assert axis >= 1
i = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = i
slicer = tuple(slicer)
new_axes = list(self.axes)
new_axes.pop(axis)
if len(self.blocks) > 1:
if not copy:
raise Exception(
"cannot get view of mixed-type or non-consolidated DataFrame"
)
new_blocks = []
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.append(newb)
else:
vals = self.blocks[0].values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
|
https://github.com/pandas-dev/pandas/issues/98
|
In [1]: df = DataFrame(index=['a', 'b', 'c'])
In [2]: df.ix['a']
ERROR: An unexpected error occurred while tokenizing input
The following traceback may be corrupted or invalid
The error message is: ('EOF in multi-line statement', (2, 0))
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
/home/wesm/code/pandas/<ipython-input-2-6181dcf1ffea> in <module>()
----> 1 df.ix['a']
/home/wesm/code/pandas/pandas/core/indexing.pyc in __getitem__(self, key)
102 return self._fancy_getitem(key, axis=0)
103 else:
--> 104 return self._fancy_getitem_axis(key, axis=0)
105
106 def __setitem__(self, key, value):
/home/wesm/code/pandas/pandas/core/indexing.pyc in _fancy_getitem_axis(self, key, axis)
200 return self.frame.xs(idx)
201 else:
--> 202 return self.frame.xs(idx, copy=False)
203 else:
204 col = key
/home/wesm/code/pandas/pandas/core/frame.pyc in xs(self, key, copy)
839
840 self._consolidate_inplace()
--> 841 values = self._data.xs(key, axis=1, copy=copy)
842 return Series(values.as_matrix(), index=self.columns)
843
/home/wesm/code/pandas/pandas/core/internals.pyc in xs(self, key, axis, copy)
451 new_blocks.append(newb)
452 else:
--> 453 vals = self.blocks[0].values[slicer]
454 if copy:
455 vals = vals.copy()
IndexError: list index out of range
|
IndexError
|
def sort(
graph: Mapping[K, DepGraphEntry[K, V, T]],
*,
allow_unresolved: bool = False,
) -> Tuple[V, ...]:
items = sort_ex(graph, allow_unresolved=allow_unresolved)
return tuple(i[1].item for i in items)
|
def sort(
graph: Mapping[K, DepGraphEntry[K, V, T]],
*,
allow_unresolved: bool = False,
) -> Iterator[V]:
items = sort_ex(graph, allow_unresolved=allow_unresolved)
return (i[1].item for i in items)
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def derive_view(
stype: s_types.Type,
*,
derived_name: Optional[sn.QualName] = None,
derived_name_quals: Optional[Sequence[str]] = (),
derived_name_base: Optional[str] = None,
preserve_shape: bool = False,
preserve_path_id: bool = False,
is_insert: bool = False,
is_update: bool = False,
is_delete: bool = False,
inheritance_merge: bool = True,
attrs: Optional[Dict[str, Any]] = None,
ctx: context.ContextLevel,
) -> s_types.Type:
if derived_name is None:
assert isinstance(stype, s_obj.DerivableObject)
derived_name = derive_view_name(
stype=stype,
derived_name_quals=derived_name_quals,
derived_name_base=derived_name_base,
ctx=ctx,
)
if is_insert:
exprtype = s_types.ExprType.Insert
elif is_update:
exprtype = s_types.ExprType.Update
elif is_delete:
exprtype = s_types.ExprType.Delete
else:
exprtype = s_types.ExprType.Select
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
attrs["expr_type"] = exprtype
derived: s_types.Type
if isinstance(stype, s_abc.Collection):
ctx.env.schema, derived = stype.derive_subtype(
ctx.env.schema,
name=derived_name,
attrs=attrs,
)
elif isinstance(stype, s_obj.DerivableInheritingObject):
ctx.env.schema, derived = stype.derive_subtype(
ctx.env.schema,
name=derived_name,
inheritance_merge=inheritance_merge,
inheritance_refdicts={"pointers"},
mark_derived=True,
transient=True,
preserve_path_id=preserve_path_id,
attrs=attrs,
)
if not stype.generic(ctx.env.schema) and isinstance(derived, s_sources.Source):
scls_pointers = stype.get_pointers(ctx.env.schema)
derived_own_pointers = derived.get_pointers(ctx.env.schema)
for pn, ptr in derived_own_pointers.items(ctx.env.schema):
# This is a view of a view. Make sure query-level
# computable expressions for pointers are carried over.
src_ptr = scls_pointers.get(ctx.env.schema, pn)
computable_data = ctx.source_map.get(src_ptr)
if computable_data is not None:
ctx.source_map[ptr] = computable_data
if src_ptr in ctx.env.pointer_specified_info:
ctx.env.pointer_derivation_map[src_ptr].append(ptr)
else:
raise TypeError("unsupported type in derive_view")
ctx.view_nodes[derived.get_name(ctx.env.schema)] = derived
if preserve_shape and stype in ctx.env.view_shapes:
ctx.env.view_shapes[derived] = ctx.env.view_shapes[stype]
ctx.env.created_schema_objects.add(derived)
return derived
|
def derive_view(
stype: s_types.Type,
*,
derived_name: Optional[sn.QualName] = None,
derived_name_quals: Optional[Sequence[str]] = (),
derived_name_base: Optional[str] = None,
preserve_shape: bool = False,
preserve_path_id: bool = False,
is_insert: bool = False,
is_update: bool = False,
is_delete: bool = False,
inheritance_merge: bool = True,
attrs: Optional[Dict[str, Any]] = None,
ctx: context.ContextLevel,
) -> s_types.Type:
if derived_name is None:
assert isinstance(stype, s_obj.DerivableObject)
derived_name = derive_view_name(
stype=stype,
derived_name_quals=derived_name_quals,
derived_name_base=derived_name_base,
ctx=ctx,
)
if is_insert:
exprtype = s_types.ExprType.Insert
elif is_update:
exprtype = s_types.ExprType.Update
elif is_delete:
exprtype = s_types.ExprType.Delete
else:
exprtype = s_types.ExprType.Select
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
attrs["expr_type"] = exprtype
derived: s_types.Type
if isinstance(stype, s_abc.Collection):
ctx.env.schema, derived = stype.derive_subtype(
ctx.env.schema, name=derived_name
)
elif isinstance(stype, s_obj.DerivableInheritingObject):
ctx.env.schema, derived = stype.derive_subtype(
ctx.env.schema,
name=derived_name,
inheritance_merge=inheritance_merge,
inheritance_refdicts={"pointers"},
mark_derived=True,
transient=True,
preserve_path_id=preserve_path_id,
attrs=attrs,
)
if not stype.generic(ctx.env.schema) and isinstance(derived, s_sources.Source):
scls_pointers = stype.get_pointers(ctx.env.schema)
derived_own_pointers = derived.get_pointers(ctx.env.schema)
for pn, ptr in derived_own_pointers.items(ctx.env.schema):
# This is a view of a view. Make sure query-level
# computable expressions for pointers are carried over.
src_ptr = scls_pointers.get(ctx.env.schema, pn)
computable_data = ctx.source_map.get(src_ptr)
if computable_data is not None:
ctx.source_map[ptr] = computable_data
if src_ptr in ctx.env.pointer_specified_info:
ctx.env.pointer_derivation_map[src_ptr].append(ptr)
else:
raise TypeError("unsupported type in derive_view")
ctx.view_nodes[derived.get_name(ctx.env.schema)] = derived
if preserve_shape and stype in ctx.env.view_shapes:
ctx.env.view_shapes[derived] = ctx.env.view_shapes[stype]
ctx.env.created_schema_objects.add(derived)
return derived
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def get_name(self, schema: s_schema.Schema) -> sn.QualName:
component_ids = sorted(str(t.get_name(schema)) for t in self.types)
nqname = f"({' | '.join(component_ids)})"
return sn.QualName(name=nqname, module="__derived__")
|
def get_name(self, schema: s_schema.Schema) -> sn.QualName:
return self.name
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def get(self, name: str, *, dummy: bool = False) -> GQLBaseType:
"""Get a special GQL type either by name or based on EdgeDB type."""
# normalize name and possibly add 'edb_base' to kwargs
edb_base = None
kwargs: Dict[str, Any] = {"dummy": dummy}
if not name.startswith("stdgraphql::"):
if edb_base is None:
if "::" in name:
edb_base = self.edb_schema.get(
name,
type=s_types.Type,
)
else:
for module in self.modules:
edb_base = self.edb_schema.get(
f"{module}::{name}",
type=s_types.Type,
default=None,
)
if edb_base:
break
# XXX: find a better way to do this
if edb_base is None:
edb_base = self.edb_schema.get_global(
s_types.Array, name, default=None
)
if edb_base is None:
edb_base = self.edb_schema.get_global(
s_types.Tuple, name, default=None
)
if edb_base is None:
raise AssertionError(f"unresolved type: {module}::{name}")
kwargs["edb_base"] = edb_base
# check if the type already exists
fkey = (name, dummy)
gqltype = self._type_map.get(fkey)
if not gqltype:
_type = GQLTypeMeta.edb_map.get(name, GQLShadowType)
gqltype = _type(schema=self, **kwargs)
self._type_map[fkey] = gqltype
return gqltype
|
def get(self, name: str, *, dummy: bool = False) -> GQLBaseType:
"""Get a special GQL type either by name or based on EdgeDB type."""
# normalize name and possibly add 'edb_base' to kwargs
edb_base = None
kwargs: Dict[str, Any] = {"dummy": dummy}
if not name.startswith("stdgraphql::"):
if edb_base is None:
type_id = s_types.type_id_from_name(s_name.name_from_string(name))
if type_id is not None:
edb_base = self.edb_schema.get_by_id(
type_id,
type=s_types.Type,
)
elif "::" in name:
edb_base = self.edb_schema.get(
name,
type=s_types.Type,
)
else:
for module in self.modules:
edb_base = self.edb_schema.get(
f"{module}::{name}",
type=s_types.Type,
default=None,
)
if edb_base:
break
if edb_base is None:
raise AssertionError(f"unresolved type: {module}::{name}")
kwargs["edb_base"] = edb_base
# check if the type already exists
fkey = (name, dummy)
gqltype = self._type_map.get(fkey)
if not gqltype:
_type = GQLTypeMeta.edb_map.get(name, GQLShadowType)
gqltype = _type(schema=self, **kwargs)
self._type_map[fkey] = gqltype
return gqltype
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _alter_pointer_type(self, pointer, schema, orig_schema, context):
old_ptr_stor_info = types.get_pointer_storage_info(pointer, schema=orig_schema)
new_target = pointer.get_target(schema)
ptr_table = old_ptr_stor_info.table_type == "link"
is_link = isinstance(pointer, s_links.Link)
is_lprop = pointer.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = pointer.get_required(schema)
changing_col_type = not is_link
if is_multi:
if isinstance(self, sd.AlterObjectFragment):
source_op = self.get_parent_op(context)
else:
source_op = self
else:
source_ctx = self.get_referrer_context_or_die(context)
source_op = source_ctx.op
# Ignore type narrowing resulting from a creation of a subtype
# as there isn't any data in the link yet.
if is_link and isinstance(source_op, sd.CreateObject):
return
new_target = pointer.get_target(schema)
orig_target = pointer.get_target(orig_schema)
new_type = types.pg_type_from_object(schema, new_target, persistent_tuples=True)
source = source_op.scls
using_eql_expr = self.cast_expr
# For links, when the new type is a supertype of the old, no
# SQL-level changes are necessary, unless an explicit conversion
# expression was specified.
if (
is_link
and using_eql_expr is None
and orig_target.issubclass(orig_schema, new_target)
):
return
if using_eql_expr is None and not is_link:
# A lack of an explicit EdgeQL conversion expression means
# that the new type is assignment-castable from the old type
# in the EdgeDB schema. BUT, it would not necessarily be
# assignment-castable in Postgres, especially if the types are
# compound. Thus, generate an explicit cast expression.
pname = pointer.get_shortname(schema).name
using_eql_expr = s_expr.Expression.from_ast(
ql_ast.TypeCast(
expr=ql_ast.Path(
partial=True,
steps=[
ql_ast.Ptr(
ptr=ql_ast.ObjectRef(name=pname),
type="property" if is_lprop else None,
),
],
),
type=s_utils.typeref_to_ast(schema, new_target),
),
schema=orig_schema,
)
# There are two major possibilities about the USING claus:
# 1) trivial case, where the USING clause refers only to the
# columns of the source table, in which case we simply compile that
# into an equivalent SQL USING clause, and 2) complex case, which
# supports arbitrary queries, but requires a temporary column,
# which is populated with the transition query and then used as the
# source for the SQL USING clause.
using_eql_expr, using_sql_expr, orig_rel_alias, sql_expr_is_trivial = (
self._compile_conversion_expr(
pointer=pointer,
conv_expr=using_eql_expr,
schema=schema,
orig_schema=orig_schema,
context=context,
)
)
expr_is_nullable = using_eql_expr.cardinality.can_be_zero()
need_temp_col = (is_multi and expr_is_nullable) or (
changing_col_type and not sql_expr_is_trivial
)
if changing_col_type:
self.pgops.add(
source_op.drop_inhview(
schema,
context,
source,
drop_ancestors=True,
)
)
tab = q(*old_ptr_stor_info.table_name)
target_col = old_ptr_stor_info.column_name
aux_ptr_table = None
aux_ptr_col = None
if is_link:
old_lb_ptr_stor_info = types.get_pointer_storage_info(
pointer, link_bias=True, schema=orig_schema
)
if (
old_lb_ptr_stor_info is not None
and old_lb_ptr_stor_info.table_type == "link"
):
aux_ptr_table = old_lb_ptr_stor_info.table_name
aux_ptr_col = old_lb_ptr_stor_info.column_name
if not sql_expr_is_trivial:
if need_temp_col:
alter_table = source_op.get_alter_table(
schema, context, priority=0, force_new=True, manual=True
)
temp_column = dbops.Column(
name=f"??{pointer.id}_{common.get_unique_random_name()}",
type=qt(new_type),
)
alter_table.add_operation(dbops.AlterTableAddColumn(temp_column))
self.pgops.add(alter_table)
target_col = temp_column.name
if is_multi:
obj_id_ref = f"{qi(orig_rel_alias)}.source"
else:
obj_id_ref = f"{qi(orig_rel_alias)}.id"
if is_required and not is_multi:
using_sql_expr = textwrap.dedent(f"""\
edgedb.raise_on_null(
({using_sql_expr}),
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || {obj_id_ref} || '"}}',
"column" => {ql(str(pointer.id))}
)
""")
update_qry = textwrap.dedent(f"""\
UPDATE {tab} AS {qi(orig_rel_alias)}
SET {qi(target_col)} = ({using_sql_expr})
""")
self.pgops.add(dbops.Query(update_qry))
actual_using_expr = qi(target_col)
else:
actual_using_expr = using_sql_expr
if changing_col_type or need_temp_col:
alter_table = source_op.get_alter_table(
schema, context, priority=0, force_new=True, manual=True
)
if is_multi:
# Remove all rows where the conversion expression produced NULLs.
col = qi(target_col)
if pointer.get_required(schema):
clean_nulls = dbops.Query(
textwrap.dedent(f"""\
WITH d AS (
DELETE FROM {tab} WHERE {col} IS NULL RETURNING source
)
SELECT
edgedb.raise(
NULL::text,
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || l.source || '"}}',
"column" => {ql(str(pointer.id))}
)
FROM
{tab} AS l
WHERE
l.source IN (SELECT source FROM d)
AND True = ALL (
SELECT {col} IS NULL
FROM {tab} AS l2
WHERE l2.source = l.source
)
LIMIT
1
INTO _dummy_text;
""")
)
else:
clean_nulls = dbops.Query(
textwrap.dedent(f"""\
DELETE FROM {tab} WHERE {col} IS NULL
""")
)
self.pgops.add(clean_nulls)
elif aux_ptr_table is not None:
# SINGLE links with link properties are represented in
# _two_ tables (the host type table and a link table with
# properties), and we must update both.
actual_col = qi(old_ptr_stor_info.column_name)
if expr_is_nullable and not is_required:
cleanup_qry = textwrap.dedent(f"""\
DELETE FROM {q(*aux_ptr_table)} AS aux
USING {tab} AS main
WHERE
main.id = aux.source
AND {actual_col} IS NULL
""")
self.pgops.add(dbops.Query(cleanup_qry))
update_qry = textwrap.dedent(f"""\
UPDATE {q(*aux_ptr_table)} AS aux
SET {qi(aux_ptr_col)} = main.{actual_col}
FROM {tab} AS main
WHERE
main.id = aux.source
""")
self.pgops.add(dbops.Query(update_qry))
if changing_col_type:
alter_type = dbops.AlterTableAlterColumnType(
old_ptr_stor_info.column_name,
common.quote_type(new_type),
using_expr=actual_using_expr,
)
alter_table.add_operation(alter_type)
elif need_temp_col:
move_data = dbops.Query(
textwrap.dedent(f"""\
UPDATE
{q(*old_ptr_stor_info.table_name)} AS {qi(orig_rel_alias)}
SET
{qi(old_ptr_stor_info.column_name)} = ({qi(target_col)})
""")
)
self.pgops.add(move_data)
if need_temp_col:
alter_table.add_operation(dbops.AlterTableDropColumn(temp_column))
if changing_col_type or need_temp_col:
self.pgops.add(alter_table)
if changing_col_type:
self.schedule_inhviews_update(
schema,
context,
source,
update_descendants=True,
update_ancestors=True,
)
|
def _alter_pointer_type(self, pointer, schema, orig_schema, context):
old_ptr_stor_info = types.get_pointer_storage_info(pointer, schema=orig_schema)
new_target = pointer.get_target(schema)
ptr_table = old_ptr_stor_info.table_type == "link"
is_link = isinstance(pointer, s_links.Link)
is_lprop = pointer.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = pointer.get_required(schema)
changing_col_type = not is_link
if is_multi:
if isinstance(self, sd.AlterObjectFragment):
source_op = self.get_parent_op(context)
else:
source_op = self
else:
source_ctx = self.get_referrer_context_or_die(context)
source_op = source_ctx.op
# Ignore type narrowing resulting from a creation of a subtype
# as there isn't any data in the link yet.
if is_link and isinstance(source_op, sd.CreateObject):
return
new_target = pointer.get_target(schema)
orig_target = pointer.get_target(orig_schema)
new_type = types.pg_type_from_object(schema, new_target, persistent_tuples=True)
source = source_op.scls
using_eql_expr = self.cast_expr
# For links, when the new type is a supertype of the old, no
# SQL-level changes are necessary, unless an explicit conversion
# expression was specified.
if (
is_link
and using_eql_expr is None
and orig_target.issubclass(schema, new_target)
):
return
if using_eql_expr is None and not is_link:
# A lack of an explicit EdgeQL conversion expression means
# that the new type is assignment-castable from the old type
# in the EdgeDB schema. BUT, it would not necessarily be
# assignment-castable in Postgres, especially if the types are
# compound. Thus, generate an explicit cast expression.
pname = pointer.get_shortname(schema).name
using_eql_expr = s_expr.Expression.from_ast(
ql_ast.TypeCast(
expr=ql_ast.Path(
partial=True,
steps=[
ql_ast.Ptr(
ptr=ql_ast.ObjectRef(name=pname),
type="property" if is_lprop else None,
),
],
),
type=s_utils.typeref_to_ast(schema, new_target),
),
schema=orig_schema,
)
# There are two major possibilities about the USING claus:
# 1) trivial case, where the USING clause refers only to the
# columns of the source table, in which case we simply compile that
# into an equivalent SQL USING clause, and 2) complex case, which
# supports arbitrary queries, but requires a temporary column,
# which is populated with the transition query and then used as the
# source for the SQL USING clause.
using_eql_expr, using_sql_expr, orig_rel_alias, sql_expr_is_trivial = (
self._compile_conversion_expr(
pointer=pointer,
conv_expr=using_eql_expr,
schema=schema,
orig_schema=orig_schema,
context=context,
)
)
expr_is_nullable = using_eql_expr.cardinality.can_be_zero()
need_temp_col = (is_multi and expr_is_nullable) or (
changing_col_type and not sql_expr_is_trivial
)
if changing_col_type:
self.pgops.add(
source_op.drop_inhview(
schema,
context,
source,
drop_ancestors=True,
)
)
tab = q(*old_ptr_stor_info.table_name)
target_col = old_ptr_stor_info.column_name
aux_ptr_table = None
aux_ptr_col = None
if is_link:
old_lb_ptr_stor_info = types.get_pointer_storage_info(
pointer, link_bias=True, schema=orig_schema
)
if (
old_lb_ptr_stor_info is not None
and old_lb_ptr_stor_info.table_type == "link"
):
aux_ptr_table = old_lb_ptr_stor_info.table_name
aux_ptr_col = old_lb_ptr_stor_info.column_name
if not sql_expr_is_trivial:
if need_temp_col:
alter_table = source_op.get_alter_table(
schema, context, priority=0, force_new=True, manual=True
)
temp_column = dbops.Column(
name=f"??{pointer.id}_{common.get_unique_random_name()}",
type=qt(new_type),
)
alter_table.add_operation(dbops.AlterTableAddColumn(temp_column))
self.pgops.add(alter_table)
target_col = temp_column.name
if is_multi:
obj_id_ref = f"{qi(orig_rel_alias)}.source"
else:
obj_id_ref = f"{qi(orig_rel_alias)}.id"
if is_required and not is_multi:
using_sql_expr = textwrap.dedent(f"""\
edgedb.raise_on_null(
({using_sql_expr}),
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || {obj_id_ref} || '"}}',
"column" => {ql(str(pointer.id))}
)
""")
update_qry = textwrap.dedent(f"""\
UPDATE {tab} AS {qi(orig_rel_alias)}
SET {qi(target_col)} = ({using_sql_expr})
""")
self.pgops.add(dbops.Query(update_qry))
actual_using_expr = qi(target_col)
else:
actual_using_expr = using_sql_expr
if changing_col_type or need_temp_col:
alter_table = source_op.get_alter_table(
schema, context, priority=0, force_new=True, manual=True
)
if is_multi:
# Remove all rows where the conversion expression produced NULLs.
col = qi(target_col)
if pointer.get_required(schema):
clean_nulls = dbops.Query(
textwrap.dedent(f"""\
WITH d AS (
DELETE FROM {tab} WHERE {col} IS NULL RETURNING source
)
SELECT
edgedb.raise(
NULL::text,
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || l.source || '"}}',
"column" => {ql(str(pointer.id))}
)
FROM
{tab} AS l
WHERE
l.source IN (SELECT source FROM d)
AND True = ALL (
SELECT {col} IS NULL
FROM {tab} AS l2
WHERE l2.source = l.source
)
LIMIT
1
INTO _dummy_text;
""")
)
else:
clean_nulls = dbops.Query(
textwrap.dedent(f"""\
DELETE FROM {tab} WHERE {col} IS NULL
""")
)
self.pgops.add(clean_nulls)
elif aux_ptr_table is not None:
# SINGLE links with link properties are represented in
# _two_ tables (the host type table and a link table with
# properties), and we must update both.
actual_col = qi(old_ptr_stor_info.column_name)
if expr_is_nullable and not is_required:
cleanup_qry = textwrap.dedent(f"""\
DELETE FROM {q(*aux_ptr_table)} AS aux
USING {tab} AS main
WHERE
main.id = aux.source
AND {actual_col} IS NULL
""")
self.pgops.add(dbops.Query(cleanup_qry))
update_qry = textwrap.dedent(f"""\
UPDATE {q(*aux_ptr_table)} AS aux
SET {qi(aux_ptr_col)} = main.{actual_col}
FROM {tab} AS main
WHERE
main.id = aux.source
""")
self.pgops.add(dbops.Query(update_qry))
if changing_col_type:
alter_type = dbops.AlterTableAlterColumnType(
old_ptr_stor_info.column_name,
common.quote_type(new_type),
using_expr=actual_using_expr,
)
alter_table.add_operation(alter_type)
elif need_temp_col:
move_data = dbops.Query(
textwrap.dedent(f"""\
UPDATE
{q(*old_ptr_stor_info.table_name)} AS {qi(orig_rel_alias)}
SET
{qi(old_ptr_stor_info.column_name)} = ({qi(target_col)})
""")
)
self.pgops.add(move_data)
if need_temp_col:
alter_table.add_operation(dbops.AlterTableDropColumn(temp_column))
if changing_col_type or need_temp_col:
self.pgops.add(alter_table)
if changing_col_type:
self.schedule_inhviews_update(
schema,
context,
source,
update_descendants=True,
update_ancestors=True,
)
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _canonicalize(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
) -> None:
super()._canonicalize(schema, context, scls)
assert isinstance(scls, Annotation)
# AnnotationValues have names derived from the abstract
# annotations. We unfortunately need to go update their names.
annot_vals = cast(
AbstractSet[AnnotationValue],
schema.get_referrers(scls, scls_type=AnnotationValue, field_name="annotation"),
)
for ref in annot_vals:
ref_name = ref.get_name(schema)
quals = list(sn.quals_from_fullname(ref_name))
new_ref_name = sn.QualName(
name=sn.get_specialized_name(self.new_name, *quals),
module=ref_name.module,
)
self.add(
self.init_rename_branch(
ref,
new_ref_name,
schema=schema,
context=context,
)
)
|
def _canonicalize(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
) -> List[sd.Command]:
assert isinstance(scls, Annotation)
commands = list(super()._canonicalize(schema, context, scls))
# AnnotationValues have names derived from the abstract
# annotations. We unfortunately need to go update their names.
annot_vals = cast(
AbstractSet[AnnotationValue],
schema.get_referrers(scls, scls_type=AnnotationValue, field_name="annotation"),
)
for ref in annot_vals:
ref_name = ref.get_name(schema)
quals = list(sn.quals_from_fullname(ref_name))
new_ref_name = sn.QualName(
name=sn.get_specialized_name(self.new_name, *quals),
module=ref_name.module,
)
commands.append(
self._canonicalize_ref_rename(
ref, ref_name, new_ref_name, schema, context, scls
)
)
return commands
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def populate_ddl_identity(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().populate_ddl_identity(schema, context)
if not isinstance(self, sd.CreateObject):
anno = self.scls.get_annotation(schema)
else:
annoname = sn.shortname_from_fullname(self.classname)
anno = schema.get(annoname, type=Annotation)
self.set_ddl_identity("annotation", anno)
return schema
|
def populate_ddl_identity(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().populate_ddl_identity(schema, context)
annoname = sn.shortname_from_fullname(self.classname)
anno = schema.get(annoname, type=Annotation)
self.set_ddl_identity("annotation", anno)
return schema
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def as_delete_delta(
self,
*,
schema: s_schema.Schema,
context: so.ComparisonContext,
) -> sd.ObjectCommand[Constraint]:
return super().as_delete_delta(schema=schema, context=context)
|
def as_delete_delta(
self,
*,
schema: s_schema.Schema,
context: so.ComparisonContext,
) -> sd.ObjectCommand[Constraint]:
return super().as_delete_delta(
schema=schema,
context=context,
)
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _canonicalize(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
) -> None:
super()._canonicalize(schema, context, scls)
assert isinstance(scls, Constraint)
# Don't do anything for concrete constraints
if not scls.get_abstract(schema):
return
# Concrete constraints are children of abstract constraints
# and have names derived from the abstract constraints. We
# unfortunately need to go update their names.
children = scls.children(schema)
for ref in children:
if ref.get_abstract(schema):
continue
ref_name = ref.get_name(schema)
quals = list(sn.quals_from_fullname(ref_name))
new_ref_name = sn.QualName(
name=sn.get_specialized_name(self.new_name, *quals),
module=ref_name.module,
)
self.add(
self.init_rename_branch(
ref,
new_ref_name,
schema=schema,
context=context,
)
)
return
|
def _canonicalize(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
) -> List[sd.Command]:
assert isinstance(scls, Constraint)
commands = list(super()._canonicalize(schema, context, scls))
# Don't do anything for concrete constraints
if not scls.get_abstract(schema):
return commands
# Concrete constraints are children of abstract constraints
# and have names derived from the abstract constraints. We
# unfortunately need to go update their names.
children = scls.children(schema)
for ref in children:
if ref.get_abstract(schema):
continue
ref_name = ref.get_name(schema)
quals = list(sn.quals_from_fullname(ref_name))
new_ref_name = sn.QualName(
name=sn.get_specialized_name(self.new_name, *quals),
module=ref_name.module,
)
commands.append(
self._canonicalize_ref_rename(
ref, ref_name, new_ref_name, schema, context, scls
)
)
return commands
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _canonicalize(
self,
schema: s_schema.Schema,
context: CommandContext,
scls: so.Object,
) -> None:
mcls = self.get_schema_metaclass()
for refdict in mcls.get_refdicts():
all_refs = set(scls.get_field_value(schema, refdict.attr).objects(schema))
ref: so.Object
for ref in all_refs:
ref_name = ref.get_name(schema)
quals = list(sn.quals_from_fullname(ref_name))
assert isinstance(self.new_name, sn.QualName)
quals[0] = str(self.new_name)
shortname = sn.shortname_from_fullname(ref_name)
new_ref_name = sn.QualName(
name=sn.get_specialized_name(shortname, *quals),
module=self.new_name.module,
)
self.add(
self.init_rename_branch(
ref,
new_ref_name,
schema=schema,
context=context,
)
)
|
def _canonicalize(
self,
schema: s_schema.Schema,
context: CommandContext,
scls: so.Object,
) -> Sequence[Command]:
mcls = self.get_schema_metaclass()
commands = []
for refdict in mcls.get_refdicts():
all_refs = set(scls.get_field_value(schema, refdict.attr).objects(schema))
ref: so.Object
for ref in all_refs:
ref_name = ref.get_name(schema)
quals = list(sn.quals_from_fullname(ref_name))
assert isinstance(self.new_name, sn.QualName)
quals[0] = str(self.new_name)
shortname = sn.shortname_from_fullname(ref_name)
new_ref_name = sn.QualName(
name=sn.get_specialized_name(shortname, *quals),
module=self.new_name.module,
)
commands.append(
self._canonicalize_ref_rename(
ref, ref_name, new_ref_name, schema, context, scls
)
)
# Record the fact that RenameObject._canonicalize
# was called on this object to guard against possible
# duplicate calls.
context.store_value(("renamecanon", self), True)
return commands
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if self.if_exists:
scls = self.get_object(schema, context, default=None)
if scls is None:
context.current().op.discard(self)
return schema
else:
scls = self.get_object(schema, context)
self.scls = scls
with self.new_context(schema, context, scls):
if (
not self.canonical
and self.if_unused
and self._has_outside_references(schema, context)
):
parent_ctx = context.parent()
if parent_ctx is not None:
parent_ctx.op.discard(self)
return schema
schema = self._delete_begin(schema, context)
schema = self._delete_innards(schema, context)
schema = self._delete_finalize(schema, context)
return schema
|
def apply(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if self.if_exists:
scls = self.get_object(schema, context, default=None)
if scls is None:
context.current().op.discard(self)
return schema
else:
scls = self.get_object(schema, context)
self.scls = scls
with self.new_context(schema, context, scls):
if (
not self.canonical
and self.if_unused
and (schema.get_referrers(scls) - self.expiring_refs)
):
parent_ctx = context.parent()
if parent_ctx is not None:
parent_ctx.op.discard(self)
return schema
schema = self._delete_begin(schema, context)
schema = self._delete_innards(schema, context)
schema = self._delete_finalize(schema, context)
return schema
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def validate_alter(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> None:
self._validate_legal_command(schema, context)
|
def validate_alter(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> None:
pass
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _alter_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
scls = self.scls
context.renames[self.classname] = self.new_name
context.renamed_objs.add(scls)
vn = scls.get_verbosename(schema)
schema = self._propagate_if_expr_refs(
schema,
context,
action=f"rename {vn}",
fixer=self._fix_referencing_expr,
)
if not context.canonical:
self.set_attribute_value(
"name",
value=self.new_name,
orig_value=self.classname,
)
return super()._alter_begin(schema, context)
|
def _alter_begin(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
self._validate_legal_command(schema, context)
for op in self.get_prerequisites():
schema = op.apply(schema, context)
if not context.canonical:
schema = self.populate_ddl_identity(schema, context)
schema = self.canonicalize_attributes(schema, context)
computed_status = self._get_computed_status_of_fields(schema, context)
self._update_computed_fields(schema, context, computed_status)
self.validate_alter(schema, context)
props = self.get_resolved_attributes(schema, context)
schema = self.scls.update(schema, props)
return schema
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _alter_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
if not context.canonical:
self._canonicalize(schema, context, self.scls)
return super()._alter_innards(schema, context)
|
def _alter_innards(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
for op in self.get_subcommands(include_prerequisites=False):
if not isinstance(op, AlterObjectProperty):
schema = op.apply(schema, context=context)
return schema
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _alter_finalize(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
return self._finalize_affected_refs(schema, context)
|
def _alter_finalize(
self,
schema: s_schema.Schema,
context: CommandContext,
) -> s_schema.Schema:
schema = self._finalize_affected_refs(schema, context)
return schema
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def get_expr_referrers(
schema: s_schema.Schema, obj: so.Object
) -> Dict[so.Object, List[str]]:
"""Return schema referrers with refs in expressions."""
refs: Dict[Tuple[Type[so.Object], str], FrozenSet[so.Object]] = (
schema.get_referrers_ex(obj)
)
result: Dict[so.Object, List[str]] = {}
for (mcls, fn), referrers in refs.items():
field = mcls.get_field(fn)
if issubclass(field.type, (Expression, ExpressionList)):
for ref in referrers:
result.setdefault(ref, []).append(fn)
return result
|
def get_expr_referrers(
schema: s_schema.Schema, obj: so.Object
) -> Dict[so.Object, List[str]]:
"""Return schema referrers with refs in expressions."""
refs = schema.get_referrers_ex(obj)
result: Dict[so.Object, List[str]] = {}
for (mcls, fn), referrers in refs.items():
field = mcls.get_field(fn)
if issubclass(field.type, (Expression, ExpressionList)):
for ref in referrers:
result.setdefault(ref, []).append(fn)
return result
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _handle_alias_op(
self,
expr: s_expr.Expression,
classname: sn.QualName,
schema: s_schema.Schema,
context: sd.CommandContext,
is_alter: bool = False,
) -> Tuple[sd.Command, sd.ObjectCommand[Alias]]:
from . import ordering as s_ordering
ir = self._compile_alias_expr(expr.qlast, classname, schema, context)
new_schema = ir.schema
expr = s_expr.Expression.from_ir(expr, ir, schema=schema)
coll_expr_aliases: List[s_types.Collection] = []
prev_coll_expr_aliases: List[s_types.Collection] = []
expr_aliases: List[s_types.Type] = []
prev_expr_aliases: List[s_types.Type] = []
prev_ir: Optional[irast.Statement] = None
old_schema: Optional[s_schema.Schema] = None
for vt in ir.views.values():
if isinstance(vt, s_types.Collection):
coll_expr_aliases.append(vt)
else:
new_schema = vt.set_field_value(new_schema, "alias_is_persistent", True)
expr_aliases.append(vt)
if is_alter:
prev = cast(s_types.Type, schema.get(classname))
prev_expr = prev.get_expr(schema)
assert prev_expr is not None
prev_ir = self._compile_alias_expr(prev_expr.qlast, classname, schema, context)
old_schema = prev_ir.schema
for vt in prev_ir.views.values():
if isinstance(vt, s_types.Collection):
prev_coll_expr_aliases.append(vt)
else:
prev_expr_aliases.append(vt)
derived_delta = sd.DeltaRoot()
for ref in ir.new_coll_types:
colltype_shell = ref.as_shell(new_schema)
# not "new_schema", because that already contains this
# collection type.
derived_delta.add(colltype_shell.as_create_delta(schema))
if is_alter:
assert old_schema is not None
derived_delta.add(
sd.delta_objects(
prev_expr_aliases,
expr_aliases,
sclass=s_types.Type,
old_schema=old_schema,
new_schema=new_schema,
context=so.ComparisonContext(),
)
)
else:
for expr_alias in expr_aliases:
derived_delta.add(
expr_alias.as_create_delta(
schema=new_schema,
context=so.ComparisonContext(),
)
)
if prev_ir is not None:
assert old_schema
for vt in prev_coll_expr_aliases:
dt = vt.as_colltype_delete_delta(
old_schema,
expiring_refs={self.scls},
view_name=classname,
)
derived_delta.prepend(dt)
for vt in prev_ir.new_coll_types:
dt = vt.as_colltype_delete_delta(
old_schema,
expiring_refs={self.scls},
if_exists=True,
)
derived_delta.prepend(dt)
for vt in coll_expr_aliases:
new_schema = vt.set_field_value(new_schema, "expr", expr)
new_schema = vt.set_field_value(new_schema, "alias_is_persistent", True)
ct = vt.as_shell(new_schema).as_create_delta(
# not "new_schema", to ensure the nested collection types
# are picked up properly.
schema,
view_name=classname,
attrs={
"expr": expr,
"alias_is_persistent": True,
"expr_type": s_types.ExprType.Select,
},
)
derived_delta.add(ct)
derived_delta = s_ordering.linearize_delta(
derived_delta, old_schema=schema, new_schema=new_schema
)
real_cmd: Optional[sd.ObjectCommand[Alias]] = None
for op in derived_delta.get_subcommands():
assert isinstance(op, sd.ObjectCommand)
if op.classname == classname and not isinstance(op, sd.DeleteObject):
real_cmd = op
break
if real_cmd is None:
assert is_alter
for expr_alias in expr_aliases:
if expr_alias.get_name(new_schema) == classname:
real_cmd = expr_alias.init_delta_command(
new_schema,
sd.AlterObject,
)
derived_delta.add(real_cmd)
break
else:
raise RuntimeError(
"view delta does not contain the expected view Create/Alter command"
)
real_cmd.set_attribute_value("expr", expr)
result = sd.CommandGroup()
result.update(derived_delta.get_subcommands())
return result, real_cmd
|
def _handle_alias_op(
self,
expr: s_expr.Expression,
classname: sn.QualName,
schema: s_schema.Schema,
context: sd.CommandContext,
is_alter: bool = False,
) -> Tuple[sd.Command, sd.ObjectCommand[Alias]]:
from . import ordering as s_ordering
ir = self._compile_alias_expr(expr.qlast, classname, schema, context)
new_schema = ir.schema
expr = s_expr.Expression.from_ir(expr, ir, schema=schema)
coll_expr_aliases: List[s_types.Collection] = []
prev_coll_expr_aliases: List[s_types.Collection] = []
expr_aliases: List[s_types.Type] = []
prev_expr_aliases: List[s_types.Type] = []
prev_ir: Optional[irast.Statement] = None
old_schema: Optional[s_schema.Schema] = None
for vt in ir.views.values():
if isinstance(vt, s_types.Collection):
coll_expr_aliases.append(vt)
else:
new_schema = vt.set_field_value(new_schema, "alias_is_persistent", True)
expr_aliases.append(vt)
if is_alter:
prev = cast(s_types.Type, schema.get(classname))
prev_expr = prev.get_expr(schema)
assert prev_expr is not None
prev_ir = self._compile_alias_expr(prev_expr.qlast, classname, schema, context)
old_schema = prev_ir.schema
for vt in prev_ir.views.values():
if isinstance(vt, s_types.Collection):
prev_coll_expr_aliases.append(vt)
else:
prev_expr_aliases.append(vt)
derived_delta = sd.DeltaRoot()
for ref in ir.new_coll_types:
colltype_shell = ref.as_shell(new_schema)
# not "new_schema", because that already contains this
# collection type.
derived_delta.add(colltype_shell.as_create_delta(schema))
if is_alter:
assert old_schema is not None
derived_delta.add(
sd.delta_objects(
prev_expr_aliases,
expr_aliases,
sclass=s_types.Type,
old_schema=old_schema,
new_schema=new_schema,
context=so.ComparisonContext(),
)
)
else:
for expr_alias in expr_aliases:
derived_delta.add(
expr_alias.as_create_delta(
schema=new_schema,
context=so.ComparisonContext(),
)
)
if prev_ir is not None:
assert old_schema
for vt in prev_coll_expr_aliases:
dt = vt.as_colltype_delete_delta(
old_schema,
expiring_refs={self.scls},
view_name=classname,
)
derived_delta.prepend(dt)
for vt in prev_ir.new_coll_types:
dt = vt.as_colltype_delete_delta(
old_schema,
expiring_refs={self.scls},
if_exists=True,
)
derived_delta.prepend(dt)
for vt in coll_expr_aliases:
new_schema = vt.set_field_value(new_schema, "expr", expr)
ct = vt.as_shell(new_schema).as_create_delta(
# not "new_schema", to ensure the nested collection types
# are picked up properly.
schema,
view_name=classname,
attrs={
"expr": expr,
"alias_is_persistent": True,
"expr_type": s_types.ExprType.Select,
},
)
new_schema = ct.apply(new_schema, context)
derived_delta.add(ct)
derived_delta = s_ordering.linearize_delta(
derived_delta, old_schema=schema, new_schema=new_schema
)
real_cmd: Optional[sd.ObjectCommand[Alias]] = None
for op in derived_delta.get_subcommands():
assert isinstance(op, sd.ObjectCommand)
if op.classname == classname and not isinstance(op, sd.DeleteObject):
real_cmd = op
break
if real_cmd is None:
assert is_alter
for expr_alias in expr_aliases:
if expr_alias.get_name(new_schema) == classname:
real_cmd = expr_alias.init_delta_command(
new_schema,
sd.AlterObject,
)
derived_delta.add(real_cmd)
break
else:
raise RuntimeError(
"view delta does not contain the expected view Create/Alter command"
)
real_cmd.set_attribute_value("expr", expr)
result = sd.CommandGroup()
result.update(derived_delta.get_subcommands())
return result, real_cmd
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _canonicalize(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
) -> None:
super()._canonicalize(schema, context, scls)
assert isinstance(scls, CallableObject)
# Don't do anything for concrete constraints
if not isinstance(scls, Function) and not scls.get_abstract(schema):
return
# params don't get picked up by the base _canonicalize because
# they aren't RefDicts (and use a different mangling scheme to
# boot), so we need to do it ourselves.
param_list = scls.get_params(schema)
params = CallableCommand._get_param_desc_from_params_ast(
schema, context.modaliases, param_list.get_ast(schema)
)
assert isinstance(self.new_name, sn.QualName)
for dparam, oparam in zip(params, param_list.objects(schema)):
self.add(
self.init_rename_branch(
oparam,
dparam.get_fqname(schema, self.new_name),
schema=schema,
context=context,
)
)
|
def _canonicalize(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
) -> List[sd.Command]:
assert isinstance(scls, CallableObject)
commands = list(super()._canonicalize(schema, context, scls))
# Don't do anything for concrete constraints
if not isinstance(scls, Function) and not scls.get_abstract(schema):
return commands
# params don't get picked up by the base _canonicalize because
# they aren't RefDicts (and use a different mangling scheme to
# boot), so we need to do it ourselves.
param_list = scls.get_params(schema)
params = CallableCommand._get_param_desc_from_params_ast(
schema, context.modaliases, param_list.get_ast(schema)
)
assert isinstance(self.new_name, sn.QualName)
for dparam, oparam in zip(params, param_list.objects(schema)):
ref_name = oparam.get_name(schema)
new_ref_name = dparam.get_fqname(schema, self.new_name)
commands.append(
self._canonicalize_ref_rename(
oparam, ref_name, new_ref_name, schema, context, scls
)
)
return commands
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _prepare_id(
cls,
schema: s_schema.Schema,
data: Dict[str, Any],
) -> uuid.UUID:
name = data.get("name")
assert isinstance(name, (str, sn.Name))
try:
return get_known_type_id(name)
except errors.SchemaError:
return cls.generate_id(schema, data)
|
def _prepare_id(cls, id: Optional[uuid.UUID], data: Dict[str, Any]) -> uuid.UUID:
if id is not None:
return id
name = data.get("name")
assert isinstance(name, (str, sn.Name))
try:
return get_known_type_id(name)
except errors.SchemaError:
return uuidgen.uuid1mc()
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def create_in_schema(
cls: Type[Object_T],
schema: s_schema.Schema,
*,
id: Optional[uuid.UUID] = None,
**data: Any,
) -> Tuple[s_schema.Schema, Object_T]:
if not cls.is_schema_object:
raise TypeError(f"{cls.__name__} type cannot be created in schema")
if not data.get("name"):
raise RuntimeError(f"cannot create {cls} without a name")
all_fields = cls.get_schema_fields()
obj_data = [None] * len(all_fields)
for field_name, value in data.items():
field = cls.get_schema_field(field_name)
value = field.coerce_value(schema, value)
obj_data[field.index] = value
if id is None:
id = cls._prepare_id(schema, data)
scls = cls._create_from_id(id)
schema = schema.add(id, cls, tuple(obj_data))
return schema, scls
|
def create_in_schema(
cls: Type[Object_T],
schema: s_schema.Schema,
*,
id: Optional[uuid.UUID] = None,
**data: Any,
) -> Tuple[s_schema.Schema, Object_T]:
if not cls.is_schema_object:
raise TypeError(f"{cls.__name__} type cannot be created in schema")
if not data.get("name"):
raise RuntimeError(f"cannot create {cls} without a name")
all_fields = cls.get_schema_fields()
obj_data = [None] * len(all_fields)
for field_name, value in data.items():
field = cls.get_schema_field(field_name)
value = field.coerce_value(schema, value)
obj_data[field.index] = value
id = cls._prepare_id(id, data)
scls = cls._create_from_id(id)
schema = schema.add(id, cls, tuple(obj_data))
return schema, scls
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def init_delta_command(
self: Object_T,
schema: s_schema.Schema,
cmdtype: Type[sd.ObjectCommand_T],
*,
classname: Optional[sn.Name] = None,
**kwargs: Any,
) -> sd.ObjectCommand_T:
from . import delta as sd
cls = type(self)
cmd = sd.get_object_delta_command(
objtype=cls,
cmdtype=cmdtype,
schema=schema,
name=classname or self.get_name(schema),
ddl_identity=self.get_ddl_identity(schema),
**kwargs,
)
self.record_cmd_object_aux_data(schema, cmd)
return cmd
|
def init_delta_command(
self: Object_T,
schema: s_schema.Schema,
cmdtype: Type[sd.ObjectCommand_T],
*,
classname: Optional[sn.Name] = None,
**kwargs: Any,
) -> sd.ObjectCommand_T:
from . import delta as sd
cls = type(self)
cmd = sd.get_object_delta_command(
objtype=cls,
cmdtype=cmdtype,
schema=schema,
name=classname or self.get_name(schema),
ddl_identity=self.get_ddl_identity(schema),
**kwargs,
)
for field in cls.get_aux_cmd_data_fields():
cmd.set_object_aux_data(
field.name,
self.get_field_value(schema, field.name),
)
return cmd
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def as_create_delta(
self: Object_T,
schema: s_schema.Schema,
context: ComparisonContext,
) -> sd.ObjectCommand[Object_T]:
from . import delta as sd
cls = type(self)
delta = self.init_delta_command(
schema,
sd.CreateObject,
canonical=True,
)
if context.generate_prompts:
delta.set_annotation("orig_cmdclass", type(delta))
ff = cls.get_fields(sorted=True).items()
fields = {fn: f for fn, f in ff if f.simpledelta and not f.ephemeral}
for fn, f in fields.items():
value = self.get_explicit_field_value(schema, fn, None)
if (
value is None
and context.descriptive_mode
and (f.describe_visibility & DescribeVisibilityFlags.SHOW_IF_DERIVED)
):
value = self.get_field_value(schema, fn)
value_from_default = True
else:
value_from_default = False
if f.aux_cmd_data:
delta.set_object_aux_data(fn, value)
if value is not None:
v: Any
if issubclass(f.type, ObjectContainer):
v = value.as_shell(schema)
else:
v = value
self.record_field_create_delta(
schema,
delta,
context=context,
fname=fn,
value=v,
from_default=value_from_default,
)
for refdict in cls.get_refdicts():
refcoll: ObjectCollection[Object] = self.get_field_value(schema, refdict.attr)
sorted_refcoll = sorted(
refcoll.objects(schema),
key=lambda o: o.get_name(schema),
)
for ref in sorted_refcoll:
delta.add(ref.as_create_delta(schema, context))
return delta
|
def as_create_delta(
self: Object_T,
schema: s_schema.Schema,
context: ComparisonContext,
) -> sd.ObjectCommand[Object_T]:
from . import delta as sd
cls = type(self)
delta = self.init_delta_command(
schema,
sd.CreateObject,
canonical=True,
)
if context.generate_prompts:
delta.set_annotation("orig_cmdclass", type(delta))
# IDs are assigned once when the object is created and
# never changed.
id_value = self.get_explicit_field_value(schema, "id")
delta.set_attribute_value("id", id_value)
ff = cls.get_fields(sorted=True).items()
fields = {fn: f for fn, f in ff if f.simpledelta and not f.ephemeral}
for fn, f in fields.items():
value = self.get_explicit_field_value(schema, fn, None)
if (
value is None
and context.descriptive_mode
and (f.describe_visibility & DescribeVisibilityFlags.SHOW_IF_DERIVED)
):
value = self.get_field_value(schema, fn)
value_from_default = True
else:
value_from_default = False
if f.aux_cmd_data:
delta.set_object_aux_data(fn, value)
if value is not None:
v: Any
if issubclass(f.type, ObjectContainer):
v = value.as_shell(schema)
else:
v = value
self.record_field_create_delta(
schema,
delta,
context=context,
fname=fn,
value=v,
from_default=value_from_default,
)
for refdict in cls.get_refdicts():
refcoll: ObjectCollection[Object] = self.get_field_value(schema, refdict.attr)
sorted_refcoll = sorted(
refcoll.objects(schema),
key=lambda o: o.get_name(schema),
)
for ref in sorted_refcoll:
delta.add(ref.as_create_delta(schema, context))
return delta
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def compare_values(
cls,
ours: ObjectCollection[Object_T],
theirs: ObjectCollection[Object_T],
*,
our_schema: s_schema.Schema,
their_schema: s_schema.Schema,
context: ComparisonContext,
compcoef: float,
) -> float:
if ours is not None:
our_names = cls._container(
context.get_obj_name(our_schema, obj) for obj in ours.objects(our_schema)
)
else:
our_names = cls._container()
if theirs is not None:
their_names = theirs.names(their_schema)
else:
their_names = cls._container()
if our_names != their_names:
return compcoef
else:
return 1.0
|
def compare_values(
cls,
ours: ObjectCollection[Object_T],
theirs: ObjectCollection[Object_T],
*,
our_schema: s_schema.Schema,
their_schema: s_schema.Schema,
context: ComparisonContext,
compcoef: float,
) -> float:
if ours is not None:
our_names = tuple(
context.get_obj_name(our_schema, obj) for obj in ours.objects(our_schema)
)
else:
our_names = cls._container()
if theirs is not None:
their_names = theirs.names(their_schema)
else:
their_names = cls._container()
if our_names != their_names:
return compcoef
else:
return 1.0
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def get_or_create_union_type(
schema: s_schema.Schema,
components: Iterable[ObjectType],
*,
opaque: bool = False,
module: Optional[str] = None,
) -> Tuple[s_schema.Schema, ObjectType, bool]:
name = s_types.get_union_type_name(
(c.get_name(schema) for c in components),
opaque=opaque,
module=module,
)
objtype = schema.get(name, default=None, type=ObjectType)
created = objtype is None
if objtype is None:
components = list(components)
std_object = schema.get("std::BaseObject", type=ObjectType)
schema, objtype = std_object.derive_subtype(
schema,
name=name,
attrs=dict(
union_of=so.ObjectSet.create(schema, components),
is_opaque_union=opaque,
abstract=True,
final=True,
),
)
if not opaque:
schema = sources.populate_pointer_set_for_source_union(
schema,
cast(List[sources.Source], components),
objtype,
modname=module,
)
return schema, objtype, created
|
def get_or_create_union_type(
schema: s_schema.Schema,
components: Iterable[ObjectType],
*,
opaque: bool = False,
module: Optional[str] = None,
) -> Tuple[s_schema.Schema, ObjectType, bool]:
type_id, name = s_types.get_union_type_id(
schema,
components,
opaque=opaque,
module=module,
)
objtype = schema.get_by_id(type_id, None, type=ObjectType)
created = objtype is None
if objtype is None:
components = list(components)
std_object = schema.get("std::BaseObject", type=ObjectType)
schema, objtype = std_object.derive_subtype(
schema,
name=name,
attrs=dict(
id=type_id,
union_of=so.ObjectSet.create(schema, components),
is_opaque_union=opaque,
abstract=True,
final=True,
),
)
if not opaque:
schema = sources.populate_pointer_set_for_source_union(
schema,
cast(List[sources.Source], components),
objtype,
modname=module,
)
return schema, objtype, created
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def get_or_create_intersection_type(
schema: s_schema.Schema,
components: Iterable[ObjectType],
*,
module: Optional[str] = None,
) -> Tuple[s_schema.Schema, ObjectType, bool]:
name = s_types.get_intersection_type_name(
(c.get_name(schema) for c in components),
module=module,
)
objtype = schema.get(name, default=None, type=ObjectType)
created = objtype is None
if objtype is None:
components = list(components)
std_object = schema.get("std::BaseObject", type=ObjectType)
schema, objtype = std_object.derive_subtype(
schema,
name=name,
attrs=dict(
intersection_of=so.ObjectSet.create(schema, components),
abstract=True,
final=True,
),
)
ptrs_dict = collections.defaultdict(list)
for component in components:
for pn, ptr in component.get_pointers(schema).items(schema):
ptrs_dict[pn].append(ptr)
intersection_pointers = {}
for pn, ptrs in ptrs_dict.items():
if len(ptrs) > 1:
# The pointer is present in more than one component.
schema, ptr = pointers.get_or_create_intersection_pointer(
schema,
ptrname=pn,
source=objtype,
components=set(ptrs),
)
else:
ptr = ptrs[0]
intersection_pointers[pn] = ptr
for pn, ptr in intersection_pointers.items():
if objtype.getptr(schema, pn) is None:
schema = objtype.add_pointer(schema, ptr)
assert isinstance(objtype, ObjectType)
return schema, objtype, created
|
def get_or_create_intersection_type(
schema: s_schema.Schema,
components: Iterable[ObjectType],
*,
module: Optional[str] = None,
) -> Tuple[s_schema.Schema, ObjectType, bool]:
type_id, name = s_types.get_intersection_type_id(
schema,
components,
module=module,
)
objtype = schema.get_by_id(type_id, None)
created = objtype is None
if objtype is None:
components = list(components)
std_object = schema.get("std::BaseObject", type=ObjectType)
schema, objtype = std_object.derive_subtype(
schema,
name=name,
attrs=dict(
id=type_id,
intersection_of=so.ObjectSet.create(schema, components),
abstract=True,
final=True,
),
)
ptrs_dict = collections.defaultdict(list)
for component in components:
for pn, ptr in component.get_pointers(schema).items(schema):
ptrs_dict[pn].append(ptr)
intersection_pointers = {}
for pn, ptrs in ptrs_dict.items():
if len(ptrs) > 1:
# The pointer is present in more than one component.
schema, ptr = pointers.get_or_create_intersection_pointer(
schema,
ptrname=pn,
source=objtype,
components=set(ptrs),
)
else:
ptr = ptrs[0]
intersection_pointers[pn] = ptr
for pn, ptr in intersection_pointers.items():
if objtype.getptr(schema, pn) is None:
schema = objtype.add_pointer(schema, ptr)
assert isinstance(objtype, ObjectType)
return schema, objtype, created
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _trace_op(
op: sd.Command,
opbranch: List[sd.Command],
depgraph: DepGraph,
renames: Dict[sn.Name, sn.Name],
renames_r: Dict[sn.Name, sn.Name],
strongrefs: Dict[sn.Name, sn.Name],
old_schema: Optional[s_schema.Schema],
new_schema: s_schema.Schema,
) -> None:
def get_deps(key: DepGraphKey) -> DepGraphEntry:
try:
item = depgraph[key]
except KeyError:
item = depgraph[key] = DepGraphEntry(
item=(),
deps=ordered.OrderedSet(),
weak_deps=ordered.OrderedSet(),
)
return item
def record_field_deps(
op: sd.AlterObjectProperty,
parent_op: sd.ObjectCommand[so.Object],
) -> str:
if isinstance(op.new_value, (so.Object, so.ObjectShell)):
nvn = op.new_value.get_name(new_schema)
if nvn is not None:
deps.add(("create", str(nvn)))
deps.add(("alter", str(nvn)))
if nvn in renames_r:
deps.add(("rename", str(renames_r[nvn])))
graph_key = f"{parent_op.classname}%%{op.property}"
deps.add(("create", str(parent_op.classname)))
if isinstance(op.old_value, (so.Object, so.ObjectShell)):
assert old_schema is not None
ovn = op.old_value.get_name(old_schema)
nvn = op.new_value.get_name(new_schema)
if ovn != nvn:
ov_item = get_deps(("delete", str(ovn)))
ov_item.deps.add((tag, graph_key))
return graph_key
def write_dep_matrix(
dependent: str,
dependent_tags: Tuple[str, ...],
dependency: str,
dependency_tags: Tuple[str, ...],
*,
as_weak: bool = False,
) -> None:
for dependent_tag in dependent_tags:
item = get_deps((dependent_tag, dependent))
for dependency_tag in dependency_tags:
if as_weak:
item.weak_deps.add((dependency_tag, dependency))
else:
item.deps.add((dependency_tag, dependency))
deps: ordered.OrderedSet[Tuple[str, str]] = ordered.OrderedSet()
graph_key: str
implicit_ancestors: List[sn.Name] = []
if isinstance(op, sd.CreateObject):
tag = "create"
elif isinstance(op, sd.AlterObject):
tag = "alter"
elif isinstance(op, sd.RenameObject):
tag = "rename"
elif isinstance(op, inheriting.RebaseInheritingObject):
tag = "rebase"
elif isinstance(op, sd.DeleteObject):
tag = "delete"
elif isinstance(op, referencing.AlterOwned):
tag = "alterowned"
elif isinstance(op, (sd.AlterObjectProperty, sd.AlterSpecialObjectField)):
tag = "field"
else:
raise RuntimeError(f"unexpected delta command type at top level: {op!r}")
if isinstance(op, (sd.DeleteObject, referencing.AlterOwned)):
assert old_schema is not None
obj = get_object(old_schema, op)
refs = _get_referrers(old_schema, obj, strongrefs)
for ref in refs:
ref_name_str = str(ref.get_name(old_schema))
if (
isinstance(obj, referencing.ReferencedObject)
and obj.get_referrer(old_schema) == ref
):
# If the referrer is enclosing the object
# (i.e. the reference is a refdict reference),
# we sort the referrer operation first.
ref_item = get_deps(("delete", ref_name_str))
ref_item.deps.add((tag, str(op.classname)))
elif (
isinstance(ref, referencing.ReferencedInheritingObject)
and (
op.classname
in {
b.get_name(old_schema)
for b in ref.get_implicit_ancestors(old_schema)
}
)
and (
not isinstance(ref, s_pointers.Pointer)
or not ref.get_from_alias(old_schema)
)
):
# If the ref is an implicit descendant (i.e. an inherited ref),
# we also sort it _after_ the parent, because we'll pull
# it as a child of the parent op at the time of tree
# reassembly.
ref_item = get_deps(("delete", ref_name_str))
ref_item.deps.add((tag, str(op.classname)))
elif (
isinstance(ref, referencing.ReferencedObject)
and ref.get_referrer(old_schema) == obj
):
# Skip refdict.backref_attr to avoid dependency cycles.
continue
else:
# Otherwise, things must be deleted _after_ their referrers
# have been deleted or altered.
deps.add(("delete", ref_name_str))
deps.add(("rebase", ref_name_str))
if isinstance(obj, referencing.ReferencedObject):
referrer = obj.get_referrer(old_schema)
if referrer is not None:
assert isinstance(referrer, so.QualifiedObject)
referrer_name: sn.Name = referrer.get_name(old_schema)
if referrer_name in renames_r:
referrer_name = renames_r[referrer_name]
# For SET OWNED, we need any rebase of the enclosing
# object to come *after*, because otherwise obj could
# get dropped before the SET OWNED takes effect.
# For DROP OWNED and DROP we want it after the rebase.
is_set_owned = isinstance(
op, referencing.AlterOwned
) and op.get_attribute_value("owned")
if is_set_owned:
ref_item = get_deps(("rebase", str(referrer_name)))
ref_item.deps.add(("alterowned", str(op.classname)))
else:
deps.add(("rebase", str(referrer_name)))
if isinstance(obj, referencing.ReferencedInheritingObject) and (
not isinstance(obj, s_pointers.Pointer)
or not obj.get_from_alias(old_schema)
):
for ancestor in obj.get_implicit_ancestors(old_schema):
ancestor_name = ancestor.get_name(old_schema)
implicit_ancestors.append(ancestor_name)
anc_item = get_deps(("delete", str(ancestor_name)))
anc_item.deps.add(("alterowned", str(op.classname)))
if is_set_owned:
# SET OWNED must come before ancestor rebases too
anc_item = get_deps(("rebase", str(ancestor_name)))
anc_item.deps.add(("alterowned", str(op.classname)))
graph_key = str(op.classname)
elif isinstance(op, sd.AlterObjectProperty):
parent_op = opbranch[-2]
assert isinstance(parent_op, sd.ObjectCommand)
graph_key = record_field_deps(op, parent_op)
elif isinstance(op, sd.AlterSpecialObjectField):
parent_op = opbranch[-2]
assert isinstance(parent_op, sd.ObjectCommand)
field_op = op._get_attribute_set_cmd(op._field)
assert field_op is not None
graph_key = record_field_deps(field_op, parent_op)
elif isinstance(op, sd.ObjectCommand):
# If the object was renamed, use the new name, else use regular.
name = renames.get(op.classname, op.classname)
obj = get_object(new_schema, op, name)
this_name_str = str(op.classname)
if tag == "rename":
# On renames, we want to delete any references before we
# do the rename. This is because for functions and
# constraints we implicitly rename the object when
# something it references is renamed, and this implicit
# rename can interfere with a CREATE/DELETE pair. So we
# make sure to put the DELETE before the RENAME of a
# referenced object. (An improvement would be to elide a
# CREATE/DELETE pair when it could be implicitly handled
# by a rename).
assert old_schema
old_obj = get_object(old_schema, op, op.classname)
for ref in _get_referrers(old_schema, old_obj, strongrefs):
deps.add(("delete", str(ref.get_name(old_schema))))
refs = _get_referrers(new_schema, obj, strongrefs)
for ref in refs:
ref_name = ref.get_name(new_schema)
if ref_name in renames_r:
ref_name = renames_r[ref_name]
ref_name_str = str(ref_name)
if (
isinstance(ref, referencing.ReferencedObject)
and ref.get_referrer(new_schema) == obj
) or (
isinstance(obj, referencing.ReferencedObject)
and obj.get_referrer(new_schema) == ref
):
# Mostly ignore refs generated by refdict backref, but
# make create/alter depend on renames of the backref.
# This makes sure that a rename is done before the innards are
# modified. DDL doesn't actually require this but some of the
# internals for producing the DDL do (since otherwise we can
# generate references to the renamed type in our delta before
# it is renamed).
if tag in ("create", "alter"):
deps.add(("rename", ref_name_str))
continue
write_dep_matrix(
dependent=ref_name_str,
dependent_tags=("create", "alter", "rebase"),
dependency=this_name_str,
dependency_tags=("create", "alter", "rename"),
)
item = get_deps(("rename", ref_name_str))
item.deps.add(("create", this_name_str))
item.deps.add(("alter", this_name_str))
item.deps.add(("rename", this_name_str))
if isinstance(ref, s_pointers.Pointer):
# The current item is a type referred to by
# a link or property in another type. Set the referring
# type and its descendants as weak dependents of the current
# item to reduce the number of unnecessary ALTERs in the
# final delta, especially ones that might result in SET TYPE
# commands being generated.
ref_src = ref.get_source(new_schema)
if isinstance(ref_src, s_pointers.Pointer):
ref_src_src = ref_src.get_source(new_schema)
if ref_src_src is not None:
ref_src = ref_src_src
if ref_src is not None:
for desc in ref_src.descendants(new_schema) | {ref_src}:
desc_name = str(desc.get_name(new_schema))
write_dep_matrix(
dependent=desc_name,
dependent_tags=("create", "alter"),
dependency=this_name_str,
dependency_tags=("create", "alter", "rename"),
as_weak=True,
)
if tag in ("create", "alter"):
# In a delete/create cycle, deletion must obviously
# happen first.
deps.add(("delete", str(op.classname)))
if isinstance(obj, s_func.Function) and old_schema is not None:
old_funcs = old_schema.get_functions(
sn.shortname_from_fullname(op.classname),
default=(),
)
for old_func in old_funcs:
deps.add(("delete", str(old_func.get_name(old_schema))))
if tag == "alter":
# Alteration must happen after creation, if any.
deps.add(("create", this_name_str))
deps.add(("rename", this_name_str))
deps.add(("rebase", this_name_str))
if isinstance(obj, referencing.ReferencedObject):
referrer = obj.get_referrer(new_schema)
if referrer is not None:
assert isinstance(referrer, so.QualifiedObject)
referrer_name = referrer.get_name(new_schema)
if referrer_name in renames_r:
referrer_name = renames_r[referrer_name]
ref_name_str = str(referrer_name)
deps.add(("create", ref_name_str))
deps.add(("rebase", ref_name_str))
if isinstance(obj, referencing.ReferencedInheritingObject):
implicit_ancestors = [
b.get_name(new_schema)
for b in obj.get_implicit_ancestors(new_schema)
]
if not isinstance(op, sd.CreateObject):
assert old_schema is not None
name = renames_r.get(op.classname, op.classname)
old_obj = get_object(old_schema, op, name)
assert isinstance(
old_obj,
referencing.ReferencedInheritingObject,
)
implicit_ancestors += [
b.get_name(old_schema)
for b in old_obj.get_implicit_ancestors(old_schema)
]
graph_key = this_name_str
else:
raise AssertionError(f"unexpected op type: {op!r}")
item = get_deps((tag, graph_key))
item.item = tuple(opbranch)
item.deps |= deps
item.extra = DepGraphEntryExtra(
implicit_ancestors=[renames_r.get(a, a) for a in implicit_ancestors],
)
|
def _trace_op(
op: sd.Command,
opbranch: List[sd.Command],
depgraph: DepGraph,
renames: Dict[sn.Name, sn.Name],
renames_r: Dict[sn.Name, sn.Name],
strongrefs: Dict[sn.Name, sn.Name],
old_schema: Optional[s_schema.Schema],
new_schema: s_schema.Schema,
) -> None:
def get_deps(key: DepGraphKey) -> DepGraphEntry:
try:
item = depgraph[key]
except KeyError:
item = depgraph[key] = DepGraphEntry(
item=(),
deps=ordered.OrderedSet(),
weak_deps=ordered.OrderedSet(),
)
return item
def record_field_deps(
op: sd.AlterObjectProperty,
parent_op: sd.ObjectCommand[so.Object],
) -> str:
if isinstance(op.new_value, (so.Object, so.ObjectShell)):
nvn = op.new_value.get_name(new_schema)
if nvn is not None:
deps.add(("create", str(nvn)))
deps.add(("alter", str(nvn)))
if nvn in renames_r:
deps.add(("rename", str(renames_r[nvn])))
graph_key = f"{parent_op.classname}%%{op.property}"
deps.add(("create", str(parent_op.classname)))
if isinstance(op.old_value, (so.Object, so.ObjectShell)):
assert old_schema is not None
ovn = op.old_value.get_name(old_schema)
nvn = op.new_value.get_name(new_schema)
if ovn != nvn:
ov_item = get_deps(("delete", str(ovn)))
ov_item.deps.add((tag, graph_key))
return graph_key
def write_dep_matrix(
dependent: str,
dependent_tags: Tuple[str, ...],
dependency: str,
dependency_tags: Tuple[str, ...],
*,
as_weak: bool = False,
) -> None:
for dependent_tag in dependent_tags:
item = get_deps((dependent_tag, dependent))
for dependency_tag in dependency_tags:
if as_weak:
item.weak_deps.add((dependency_tag, dependency))
else:
item.deps.add((dependency_tag, dependency))
deps: ordered.OrderedSet[Tuple[str, str]] = ordered.OrderedSet()
graph_key: str
implicit_ancestors: List[sn.Name] = []
if isinstance(op, sd.CreateObject):
tag = "create"
elif isinstance(op, sd.AlterObject):
tag = "alter"
elif isinstance(op, sd.RenameObject):
tag = "rename"
elif isinstance(op, inheriting.RebaseInheritingObject):
tag = "rebase"
elif isinstance(op, sd.DeleteObject):
tag = "delete"
elif isinstance(op, referencing.AlterOwned):
tag = "alterowned"
elif isinstance(op, (sd.AlterObjectProperty, sd.AlterSpecialObjectField)):
tag = "field"
else:
raise RuntimeError(f"unexpected delta command type at top level: {op!r}")
if isinstance(op, (sd.DeleteObject, referencing.AlterOwned)):
assert old_schema is not None
obj = get_object(old_schema, op)
refs = _get_referrers(old_schema, obj, strongrefs)
for ref in refs:
ref_name_str = str(ref.get_name(old_schema))
if (
isinstance(obj, referencing.ReferencedObject)
and obj.get_referrer(old_schema) == ref
):
# If the referrer is enclosing the object
# (i.e. the reference is a refdict reference),
# we sort the referrer operation first.
ref_item = get_deps(("delete", ref_name_str))
ref_item.deps.add((tag, str(op.classname)))
elif (
isinstance(ref, referencing.ReferencedInheritingObject)
and (
op.classname
in {
b.get_name(old_schema)
for b in ref.get_implicit_ancestors(old_schema)
}
)
and (
not isinstance(ref, s_pointers.Pointer)
or not ref.get_from_alias(old_schema)
)
):
# If the ref is an implicit descendant (i.e. an inherited ref),
# we also sort it _after_ the parent, because we'll pull
# it as a child of the parent op at the time of tree
# reassembly.
ref_item = get_deps(("delete", ref_name_str))
ref_item.deps.add((tag, str(op.classname)))
elif (
isinstance(ref, referencing.ReferencedObject)
and ref.get_referrer(old_schema) == obj
):
# Skip refdict.backref_attr to avoid dependency cycles.
continue
else:
# Otherwise, things must be deleted _after_ their referrers
# have been deleted or altered.
deps.add(("delete", ref_name_str))
deps.add(("rebase", ref_name_str))
if isinstance(obj, referencing.ReferencedObject):
referrer = obj.get_referrer(old_schema)
if referrer is not None:
assert isinstance(referrer, so.QualifiedObject)
referrer_name: sn.Name = referrer.get_name(old_schema)
if referrer_name in renames_r:
referrer_name = renames_r[referrer_name]
# For SET OWNED, we need any rebase of the enclosing
# object to come *after*, because otherwise obj could
# get dropped before the SET OWNED takes effect.
# For DROP OWNED and DROP we want it after the rebase.
is_set_owned = isinstance(
op, referencing.AlterOwned
) and op.get_attribute_value("owned")
if is_set_owned:
ref_item = get_deps(("rebase", str(referrer_name)))
ref_item.deps.add(("alterowned", str(op.classname)))
else:
deps.add(("rebase", str(referrer_name)))
if isinstance(obj, referencing.ReferencedInheritingObject) and (
not isinstance(obj, s_pointers.Pointer)
or not obj.get_from_alias(old_schema)
):
for ancestor in obj.get_implicit_ancestors(old_schema):
ancestor_name = ancestor.get_name(old_schema)
implicit_ancestors.append(ancestor_name)
anc_item = get_deps(("delete", str(ancestor_name)))
anc_item.deps.add(("alterowned", str(op.classname)))
if is_set_owned:
# SET OWNED must come before ancestor rebases too
anc_item = get_deps(("rebase", str(ancestor_name)))
anc_item.deps.add(("alterowned", str(op.classname)))
graph_key = str(op.classname)
elif isinstance(op, sd.AlterObjectProperty):
parent_op = opbranch[-2]
assert isinstance(parent_op, sd.ObjectCommand)
graph_key = record_field_deps(op, parent_op)
elif isinstance(op, sd.AlterSpecialObjectField):
parent_op = opbranch[-2]
assert isinstance(parent_op, sd.ObjectCommand)
field_op = op._get_attribute_set_cmd(op._field)
assert field_op is not None
graph_key = record_field_deps(field_op, parent_op)
elif isinstance(op, sd.ObjectCommand):
# If the object was renamed, use the new name, else use regular.
name = renames.get(op.classname, op.classname)
obj = get_object(new_schema, op, name)
this_name_str = str(op.classname)
if tag == "rename":
# On renames, we want to delete any references before we
# do the rename. This is because for functions and
# constraints we implicitly rename the object when
# something it references is renamed, and this implicit
# rename can interfere with a CREATE/DELETE pair. So we
# make sure to put the DELETE before the RENAME of a
# referenced object. (An improvement would be to elide a
# CREATE/DELETE pair when it could be implicitly handled
# by a rename).
assert old_schema
old_obj = get_object(old_schema, op, op.classname)
for ref in _get_referrers(old_schema, old_obj, strongrefs):
deps.add(("delete", str(ref.get_name(old_schema))))
refs = _get_referrers(new_schema, obj, strongrefs)
for ref in refs:
ref_name = ref.get_name(new_schema)
if ref_name in renames_r:
ref_name = renames_r[ref_name]
ref_name_str = str(ref_name)
if (
isinstance(ref, referencing.ReferencedObject)
and ref.get_referrer(new_schema) == obj
) or (
isinstance(obj, referencing.ReferencedObject)
and obj.get_referrer(new_schema) == ref
):
# Mostly ignore refs generated by refdict backref, but
# make create/alter depend on renames of the backref.
# This makes sure that a rename is done before the innards are
# modified. DDL doesn't actually require this but some of the
# internals for producing the DDL do (since otherwise we can
# generate references to the renamed type in our delta before
# it is renamed).
if tag in ("create", "alter"):
deps.add(("rename", ref_name_str))
continue
write_dep_matrix(
dependent=ref_name_str,
dependent_tags=("create", "alter", "rebase"),
dependency=this_name_str,
dependency_tags=("create", "alter", "rename"),
)
item = get_deps(("rename", ref_name_str))
item.deps.add(("create", this_name_str))
item.deps.add(("alter", this_name_str))
if isinstance(ref, s_pointers.Pointer):
# The current item is a type referred to by
# a link or property in another type. Set the referring
# type and its descendants as weak dependents of the current
# item to reduce the number of unnecessary ALTERs in the
# final delta, especially ones that might result in SET TYPE
# commands being generated.
ref_src = ref.get_source(new_schema)
if isinstance(ref_src, s_pointers.Pointer):
ref_src_src = ref_src.get_source(new_schema)
if ref_src_src is not None:
ref_src = ref_src_src
if ref_src is not None:
for desc in ref_src.descendants(new_schema) | {ref_src}:
desc_name = str(desc.get_name(new_schema))
write_dep_matrix(
dependent=desc_name,
dependent_tags=("create", "alter"),
dependency=this_name_str,
dependency_tags=("create", "alter", "rename"),
as_weak=True,
)
if tag in ("create", "alter"):
# In a delete/create cycle, deletion must obviously
# happen first.
deps.add(("delete", str(op.classname)))
if isinstance(obj, s_func.Function) and old_schema is not None:
old_funcs = old_schema.get_functions(
sn.shortname_from_fullname(op.classname),
default=(),
)
for old_func in old_funcs:
deps.add(("delete", str(old_func.get_name(old_schema))))
if tag == "alter":
# Alteration must happen after creation, if any.
deps.add(("create", this_name_str))
deps.add(("rename", this_name_str))
deps.add(("rebase", this_name_str))
if isinstance(obj, referencing.ReferencedObject):
referrer = obj.get_referrer(new_schema)
if referrer is not None:
assert isinstance(referrer, so.QualifiedObject)
referrer_name = referrer.get_name(new_schema)
if referrer_name in renames_r:
referrer_name = renames_r[referrer_name]
ref_name_str = str(referrer_name)
deps.add(("create", ref_name_str))
deps.add(("rebase", ref_name_str))
if isinstance(obj, referencing.ReferencedInheritingObject):
implicit_ancestors = [
b.get_name(new_schema)
for b in obj.get_implicit_ancestors(new_schema)
]
if not isinstance(op, sd.CreateObject):
assert old_schema is not None
name = renames_r.get(op.classname, op.classname)
old_obj = get_object(old_schema, op, name)
assert isinstance(
old_obj,
referencing.ReferencedInheritingObject,
)
implicit_ancestors += [
b.get_name(old_schema)
for b in old_obj.get_implicit_ancestors(old_schema)
]
graph_key = this_name_str
else:
raise AssertionError(f"unexpected op type: {op!r}")
item = get_deps((tag, graph_key))
item.item = tuple(opbranch)
item.deps |= deps
item.extra = DepGraphEntryExtra(
implicit_ancestors=[renames_r.get(a, a) for a in implicit_ancestors],
)
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def get_object(
schema: s_schema.Schema,
op: sd.ObjectCommand[so.Object],
name: Optional[sn.Name] = None,
) -> so.Object:
metaclass = op.get_schema_metaclass()
if name is None:
name = op.classname
if issubclass(metaclass, s_types.Collection):
if isinstance(name, sn.QualName):
return schema.get(name)
else:
return schema.get_global(metaclass, name)
elif not issubclass(metaclass, so.QualifiedObject):
obj = schema.get_global(metaclass, name)
assert isinstance(obj, so.Object)
return obj
else:
return schema.get(name)
|
def get_object(
schema: s_schema.Schema,
op: sd.ObjectCommand[so.Object],
name: Optional[sn.Name] = None,
) -> so.Object:
metaclass = op.get_schema_metaclass()
if name is None:
name = op.classname
if issubclass(metaclass, s_types.Collection):
if isinstance(name, sn.QualName):
return schema.get(name)
else:
t_id = s_types.type_id_from_name(name)
assert t_id is not None
return schema.get_by_id(t_id)
elif not issubclass(metaclass, so.QualifiedObject):
obj = schema.get_global(metaclass, name)
assert isinstance(obj, so.Object)
return obj
else:
return schema.get(name)
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def canonicalize_attributes(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().canonicalize_attributes(schema, context)
target_ref = self.get_local_attribute_value("target")
inf_target_ref: Optional[s_types.TypeShell]
# When cardinality/required is altered, we need to force a
# reconsideration of expr if it exists in order to check
# it against the new specifier or compute them on a
# RESET. This is kind of unfortunate.
if (
isinstance(self, sd.AlterObject)
and (
self.has_attribute_value("cardinality")
or self.has_attribute_value("required")
)
and not self.has_attribute_value("expr")
and (expr := self.scls.get_expr(schema)) is not None
):
self.set_attribute_value("expr", s_expr.Expression.not_compiled(expr))
if isinstance(target_ref, ComputableRef):
schema, inf_target_ref, base = self._parse_computable(
target_ref.expr, schema, context
)
elif (expr := self.get_local_attribute_value("expr")) is not None:
schema, inf_target_ref, base = self._parse_computable(
expr.qlast, schema, context
)
else:
inf_target_ref = None
base = None
if base is not None:
self.set_attribute_value(
"bases",
so.ObjectList.create(schema, [base]),
)
self.set_attribute_value("is_derived", True)
if context.declarative:
self.set_attribute_value("declared_overloaded", True)
if inf_target_ref is not None:
srcctx = self.get_attribute_source_context("target")
self.set_attribute_value(
"target",
inf_target_ref,
source_context=srcctx,
computed=True,
)
schema = s_types.materialize_type_in_attribute(schema, context, self, "target")
expr = self.get_local_attribute_value("expr")
if expr is not None:
# There is an expression, therefore it is a computable.
self.set_attribute_value("computable", True)
return schema
|
def canonicalize_attributes(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().canonicalize_attributes(schema, context)
target_ref = self.get_local_attribute_value("target")
inf_target_ref: Optional[s_types.TypeShell]
# When cardinality/required is altered, we need to force a
# reconsideration of expr if it exists in order to check
# it against the new specifier or compute them on a
# RESET. This is kind of unfortunate.
if (
isinstance(self, sd.AlterObject)
and (
self.has_attribute_value("cardinality")
or self.has_attribute_value("required")
)
and not self.has_attribute_value("expr")
and (expr := self.scls.get_expr(schema)) is not None
):
self.set_attribute_value("expr", s_expr.Expression.not_compiled(expr))
if isinstance(target_ref, ComputableRef):
schema, inf_target_ref, base = self._parse_computable(
target_ref.expr, schema, context
)
elif (expr := self.get_local_attribute_value("expr")) is not None:
schema, inf_target_ref, base = self._parse_computable(
expr.qlast, schema, context
)
else:
inf_target_ref = None
base = None
if base is not None:
self.set_attribute_value(
"bases",
so.ObjectList.create(schema, [base]),
)
self.set_attribute_value("is_derived", True)
if context.declarative:
self.set_attribute_value("declared_overloaded", True)
if inf_target_ref is not None:
srcctx = self.get_attribute_source_context("target")
self.set_attribute_value(
"target",
inf_target_ref,
source_context=srcctx,
)
schema = s_types.materialize_type_in_attribute(schema, context, self, "target")
expr = self.get_local_attribute_value("expr")
if expr is not None:
# There is an expression, therefore it is a computable.
self.set_attribute_value("computable", True)
return schema
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _process_create_or_alter_ast(
self,
schema: s_schema.Schema,
astnode: qlast.CreateConcretePointer,
context: sd.CommandContext,
) -> None:
"""Handle the CREATE {PROPERTY|LINK} AST node.
This may be called in the context of either Create or Alter.
"""
if astnode.is_required is not None:
self.set_attribute_value(
"required",
astnode.is_required,
source_context=astnode.context,
)
if astnode.cardinality is not None:
if isinstance(self, sd.CreateObject):
self.set_attribute_value(
"cardinality",
astnode.cardinality,
source_context=astnode.context,
)
else:
handler = sd.get_special_field_alter_handler_for_context(
"cardinality", context
)
assert handler is not None
set_field = qlast.SetField(
name="cardinality",
value=qlast.StringConstant.from_python(
str(astnode.cardinality),
),
special_syntax=True,
context=astnode.context,
)
apc = handler._cmd_tree_from_ast(schema, set_field, context)
self.add(apc)
parent_ctx = self.get_referrer_context_or_die(context)
source_name = context.get_referrer_name(parent_ctx)
self.set_attribute_value("source", so.ObjectShell(name=source_name))
# FIXME: this is an approximate solution
targets = qlast.get_targets(astnode.target)
target_ref: Union[None, s_types.TypeShell, ComputableRef]
if len(targets) > 1:
assert isinstance(source_name, sn.QualName)
new_targets = [
utils.ast_to_type_shell(
t,
modaliases=context.modaliases,
schema=schema,
)
for t in targets
]
target_ref = s_types.UnionTypeShell(
components=new_targets,
module=source_name.module,
)
elif targets:
target_expr = targets[0]
if isinstance(target_expr, qlast.TypeName):
target_ref = utils.ast_to_type_shell(
target_expr,
modaliases=context.modaliases,
schema=schema,
)
else:
# computable
qlcompiler.normalize(
target_expr, schema=schema, modaliases=context.modaliases
)
target_ref = ComputableRef(target_expr)
else:
# Target is inherited.
target_ref = None
if isinstance(self, sd.CreateObject):
assert astnode.target is not None
self.set_attribute_value(
"target",
target_ref,
source_context=astnode.target.context,
)
elif target_ref is not None:
assert astnode.target is not None
self.set_attribute_value(
"target",
target_ref,
source_context=astnode.target.context,
)
|
def _process_create_or_alter_ast(
self,
schema: s_schema.Schema,
astnode: qlast.CreateConcretePointer,
context: sd.CommandContext,
) -> None:
"""Handle the CREATE {PROPERTY|LINK} AST node.
This may be called in the context of either Create or Alter.
"""
if astnode.is_required is not None:
self.set_attribute_value(
"required",
astnode.is_required,
source_context=astnode.context,
)
if astnode.cardinality is not None:
if isinstance(self, sd.CreateObject):
self.set_attribute_value(
"cardinality",
astnode.cardinality,
source_context=astnode.context,
)
else:
handler = sd.get_special_field_alter_handler_for_context(
"cardinality", context
)
assert handler is not None
set_field = qlast.SetField(
name="cardinality",
value=qlast.StringConstant.from_python(
str(astnode.cardinality),
),
special_syntax=True,
context=astnode.context,
)
apc = handler._cmd_tree_from_ast(schema, set_field, context)
self.add(apc)
parent_ctx = self.get_referrer_context_or_die(context)
source_name = context.get_referrer_name(parent_ctx)
self.set_attribute_value("source", so.ObjectShell(name=source_name))
# FIXME: this is an approximate solution
targets = qlast.get_targets(astnode.target)
target_ref: Union[None, s_types.TypeShell, ComputableRef]
if len(targets) > 1:
assert isinstance(source_name, sn.QualName)
new_targets = [
utils.ast_to_type_shell(
t,
modaliases=context.modaliases,
schema=schema,
)
for t in targets
]
target_ref = s_types.UnionTypeShell(
new_targets,
module=source_name.module,
)
elif targets:
target_expr = targets[0]
if isinstance(target_expr, qlast.TypeName):
target_ref = utils.ast_to_type_shell(
target_expr,
modaliases=context.modaliases,
schema=schema,
)
else:
# computable
qlcompiler.normalize(
target_expr, schema=schema, modaliases=context.modaliases
)
target_ref = ComputableRef(target_expr)
else:
# Target is inherited.
target_ref = None
if isinstance(self, sd.CreateObject):
assert astnode.target is not None
self.set_attribute_value(
"target",
target_ref,
source_context=astnode.target.context,
)
elif target_ref is not None:
assert astnode.target is not None
self.set_attribute_value(
"target",
target_ref,
source_context=astnode.target.context,
)
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
orig_rec = context.current().enable_recursion
context.current().enable_recursion = False
schema = super()._alter_begin(schema, context)
context.current().enable_recursion = orig_rec
scls = self.scls
vn = scls.get_verbosename(schema, with_parent=True)
orig_target = scls.get_target(orig_schema)
new_target = scls.get_target(schema)
if new_target is None:
# This will happen if `RESET TYPE` was called
# on a non-inherited type.
raise errors.SchemaError(
f"cannot RESET TYPE of {vn} because it is not inherited",
context=self.source_context,
)
if orig_target == new_target:
return schema
if not context.canonical:
assert orig_target is not None
assert new_target is not None
ptr_op = self.get_parent_op(context)
src_op = self.get_referrer_context_or_die(context).op
if self._needs_cast_expr(
schema=schema,
ptr_op=ptr_op,
src_op=src_op,
old_type=orig_target,
new_type=new_target,
):
vn = scls.get_verbosename(schema, with_parent=True)
ot = orig_target.get_verbosename(schema)
nt = new_target.get_verbosename(schema)
raise errors.SchemaError(
f"{vn} cannot be cast automatically from {ot} to {nt}",
hint=(
"You might need to specify a conversion "
"expression in a USING clause"
),
context=self.source_context,
)
if self.cast_expr is not None:
vn = scls.get_verbosename(schema, with_parent=True)
self.cast_expr = self._compile_expr(
schema=orig_schema,
context=context,
expr=self.cast_expr,
target_as_singleton=True,
singleton_result_expected=True,
expr_description=(f"the USING clause for the alteration of {vn}"),
)
using_type = self.cast_expr.stype
if not using_type.assignment_castable_to(
new_target,
self.cast_expr.schema,
):
ot = using_type.get_verbosename(self.cast_expr.schema)
nt = new_target.get_verbosename(schema)
raise errors.SchemaError(
f"result of USING clause for the alteration of "
f"{vn} cannot be cast automatically from "
f"{ot} to {nt} ",
hint="You might need to add an explicit cast.",
context=self.source_context,
)
schema = self._propagate_if_expr_refs(
schema,
context,
action=self.get_friendly_description(schema=schema),
)
if orig_target is not None:
if isinstance(orig_target, s_types.Collection):
parent_op = self.get_parent_op(context)
cleanup_op = orig_target.as_colltype_delete_delta(
schema, expiring_refs={scls}
)
parent_op.add(cleanup_op)
schema = cleanup_op.apply(schema, context)
elif orig_target.is_compound_type(schema):
parent_op = self.get_parent_op(context)
cleanup_op = orig_target.init_delta_command(
schema,
sd.DeleteObject,
if_unused=True,
expiring_refs={scls},
)
parent_op.add(cleanup_op)
schema = cleanup_op.apply(schema, context)
if context.enable_recursion:
schema = self._propagate_ref_field_alter_in_inheritance(
schema,
context,
field_name="target",
)
return schema
|
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
orig_rec = context.current().enable_recursion
context.current().enable_recursion = False
schema = super()._alter_begin(schema, context)
context.current().enable_recursion = orig_rec
scls = self.scls
vn = scls.get_verbosename(schema, with_parent=True)
orig_target = scls.get_target(orig_schema)
new_target = scls.get_target(schema)
if new_target is None:
# This will happen if `RESET TYPE` was called
# on a non-inherited type.
raise errors.SchemaError(
f"cannot RESET TYPE of {vn} because it is not inherited",
context=self.source_context,
)
if orig_target == new_target:
return schema
if not context.canonical:
assert orig_target is not None
assert new_target is not None
ptr_op = self.get_parent_op(context)
src_op = self.get_referrer_context_or_die(context).op
if self._needs_cast_expr(
schema=schema,
ptr_op=ptr_op,
src_op=src_op,
old_type=orig_target,
new_type=new_target,
):
vn = scls.get_verbosename(schema, with_parent=True)
ot = orig_target.get_verbosename(schema)
nt = new_target.get_verbosename(schema)
raise errors.SchemaError(
f"{vn} cannot be cast automatically from {ot} to {nt}",
hint=(
"You might need to specify a conversion "
"expression in a USING clause"
),
context=self.source_context,
)
if self.cast_expr is not None:
vn = scls.get_verbosename(schema, with_parent=True)
self.cast_expr = self._compile_expr(
schema=orig_schema,
context=context,
expr=self.cast_expr,
target_as_singleton=True,
singleton_result_expected=True,
expr_description=(f"the USING clause for the alteration of {vn}"),
)
using_type = self.cast_expr.stype
if not using_type.assignment_castable_to(
new_target,
self.cast_expr.schema,
):
ot = using_type.get_verbosename(self.cast_expr.schema)
nt = new_target.get_verbosename(schema)
raise errors.SchemaError(
f"result of USING clause for the alteration of "
f"{vn} cannot be cast automatically from "
f"{ot} to {nt} ",
hint="You might need to add an explicit cast.",
context=self.source_context,
)
schema = self._propagate_if_expr_refs(
schema,
context,
action=self.get_friendly_description(schema=schema),
)
if orig_target is not None and isinstance(orig_target, s_types.Collection):
parent_ctx = context.parent()
assert parent_ctx
parent_ctx.op.add(
orig_target.as_colltype_delete_delta(schema, expiring_refs={scls})
)
if context.enable_recursion:
schema = self._propagate_ref_field_alter_in_inheritance(
schema,
context,
field_name="target",
)
return schema
|
https://github.com/edgedb/edgedb/issues/2002
|
ERROR: InternalServerError: 'UnionType' object has no attribute 'get_name'
Hint: This is most likely a bug in EdgeDB. Please consider opening an issue ticket at https://github.com/edgedb/edgedb/issues/new?template=bug_report.md
Server traceback:
Traceback (most recent call last):
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/procpool/worker.py", line 75, in worker
res = await meth(*args)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1808, in compile_eql_tokens
return self._compile(ctx=ctx, tokens=eql_tokens)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1442, in _compile
comp: dbstate.BaseQuery = self._compile_dispatch_ql(ctx, stmt)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 1365, in _compile_dispatch_ql
return self._compile_ql_migration(ctx, ql)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/server/compiler/compiler.py", line 794, in _compile_ql_migration
target_schema = s_ddl.apply_sdl(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/schema/ddl.py", line 371, in apply_sdl
ddl_stmts = s_decl.sdl_to_ddl(current_schema, documents)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 230, in sdl_to_ddl
trace_dependencies(decl_ast, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 499, in trace_default
_register_item(node, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 623, in _register_item
trace_dependencies(cmd, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 443, in trace_ConcretePointer
_register_item(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/declarative.py", line 638, in _register_item
tdeps = qltracer.trace_refs(
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 162, in trace_refs
trace(qltree, ctx=ctx)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/functools.py", line 875, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/lib/x86_64-linux-gnu/edgedb-server-1-alpha6/lib/python3.8/site-packages/edb/edgeql/tracer.py", line 653, in trace_Select
ctx.path_prefix = tip.get_name(ctx.schema)
AttributeError: 'UnionType' object has no attribute 'get_name'
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.