function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def testMakeBatchedFeaturesDataset(self):
files = 2
records_per_file = 5
def make_record(file_index):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[file_index])),
}))
return example.SerializeToString()
filenames = []
for file_index in range(files):
filename = os.path.join(self.get_temp_dir(),
"tf_record.%d.txt" % file_index)
filenames.append(filename)
writer = python_io.TFRecordWriter(filename)
for _ in range(records_per_file):
writer.write(make_record(file_index))
writer.close()
dataset = readers.make_batched_features_dataset(
file_pattern=filenames,
batch_size=records_per_file,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
},
reader=core_readers.TFRecordDataset,
num_epochs=1)
# We should shard at the file level, so that all records come from file 0.
dataset = distribute._AutoShardDataset(dataset, 2, 0)
dataset = dataset.unbatch()
output = self.getDatasetOutput(dataset)
files = [elem["file"] for elem in output]
self.assertEqual(files, [0] * records_per_file) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testHintShardingValidPattern(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.HINT)
dataset = dataset_ops.Dataset.range(100).shard(distribute.SHARD_HINT, 0)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.assertDatasetProduces(dataset, list(range(0, 100, 10))) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testHintShardingInvalidPattern(self):
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = (
options_lib.AutoShardPolicy.HINT)
dataset = dataset_ops.Dataset.range(100).shard(1, 0)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.assertDatasetProduces(dataset, list(range(100))) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testEnumerateAutoShardPolicies(self, auto_shard_policy):
"""Verifies tf.data handles every auto-shard policy with no errors."""
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
self.getDatasetOutput(dataset, requires_initialization=True) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def _setUpFiles(self, num_files, num_records_per_file):
self._num_files = num_files
self._num_records = num_records_per_file
self._filenames = self._createFiles() | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testFileShardingWithLegacyRebatch(self):
# Tests that RebatchDatasetV1 is a passthrough op.
self._setUpFiles(num_files=5, num_records_per_file=10)
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
testing.assert_next(["Shard", "FlatMap", "Batch", "Rebatch"]))
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._LegacyRebatchDataset(dataset, num_replicas=5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [[self._record(3, i)] for i in range(10)]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testFileShardingWithRebatch(self):
# Tests that RebatchDatasetV2 is a passthrough op.
self._setUpFiles(num_files=3, num_records_per_file=5)
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
testing.assert_next(["Shard", "FlatMap", "Batch", "Rebatch"]))
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._RebatchDataset(dataset, batch_sizes=[2, 1, 2])
dataset = distribute._AutoShardDataset(dataset, 3, 1)
expected = [[self._record(1, 0), self._record(1, 1)], [self._record(1, 2)],
[self._record(1, 3), self._record(1, 4)]]
self.assertDatasetProduces(dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def testUseLegacyRebatchWithDataSharding(self, sharding_policy,
with_prefetch):
# This test simulates a distributed environment with 3 workers, each with
# 1 replica.
dataset = dataset_ops.Dataset.range(8)
dataset = dataset.batch(4)
options = options_lib.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
# We expect the auto-shard rewrite to rewrite RebatchDatasetV2 to
# RebatchDataset(V1) for correctness reasons. This will modify the output
# of the dataset.
worker_a_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[2, 1, 1])
if with_prefetch:
worker_a_dataset = worker_a_dataset.prefetch(1)
worker_a_dataset = distribute._AutoShardDataset(
worker_a_dataset, 3, 0, num_replicas=3)
expected = [[0, 1], [4, 5]]
self.assertDatasetProduces(worker_a_dataset, expected)
worker_b_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[1, 1, 2])
if with_prefetch:
worker_b_dataset = worker_b_dataset.prefetch(1)
worker_b_dataset = distribute._AutoShardDataset(
worker_b_dataset, 3, 1, num_replicas=3)
expected = [[2, 3], [6, 7]]
self.assertDatasetProduces(worker_b_dataset, expected)
worker_c_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[1, 2, 1])
if with_prefetch:
worker_c_dataset = worker_c_dataset.prefetch(1)
worker_c_dataset = distribute._AutoShardDataset(
worker_c_dataset, 3, 2, num_replicas=3)
expected = [[], []]
self.assertDatasetProduces(worker_c_dataset, expected) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def setUp(self):
super(AutoShardDatasetCheckpointTest, self).setUp()
self._num_files = 10
self._num_records = 10
self._filenames = self._createFiles() | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def test(self, verify_fn):
def build_dataset():
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
return dataset
verify_fn(self, build_dataset, num_outputs=20) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def setUp(self):
"""Create a user and log in. """
super(SupportViewTestCase, self).setUp()
self.user = UserFactory(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.course = CourseFactory.create()
success = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(success, msg="Could not log in") | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def setUp(self):
"""Make the user support staff"""
super(SupportViewManageUserTests, self).setUp()
SupportStaffRole().add_users(self.user) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_get_form_with_user_info(self):
"""
Tests Support View to return Manage User Form
with user info
"""
url = reverse('support:manage_user_detail') + self.user.username
response = self.client.get(url)
data = json.loads(response.content)
self.assertEqual(data['username'], self.user.username) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_access(self, url_name, role, has_access):
if role is not None:
role().add_users(self.user)
url = reverse(url_name)
response = self.client.get(url)
if has_access:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 403) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_require_login(self, url_name):
url = reverse(url_name)
# Log out then try to retrieve the page
self.client.logout()
response = self.client.get(url)
# Expect a redirect to the login page
redirect_url = "{login_url}?next={original_url}".format(
login_url=reverse("signin_user"),
original_url=url,
)
self.assertRedirects(response, redirect_url) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def setUp(self):
"""Make the user support staff. """
super(SupportViewIndexTests, self).setUp()
SupportStaffRole().add_users(self.user) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def setUp(self):
"""Make the user support staff. """
super(SupportViewCertificatesTests, self).setUp()
SupportStaffRole().add_users(self.user) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_certificates_with_user_filter(self):
# Check that an initial filter is passed to the JavaScript client.
url = reverse("support:certificates") + "?user=student@example.com"
response = self.client.get(url)
self.assertContains(response, "userFilter: 'student@example.com'") | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def setUp(self):
super(SupportViewEnrollmentsTests, self).setUp()
SupportStaffRole().add_users(self.user)
self.course = CourseFactory(display_name=u'teꜱᴛ')
self.student = UserFactory.create(username='student', email='test@example.com', password='test')
for mode in (
CourseMode.AUDIT, CourseMode.PROFESSIONAL, CourseMode.CREDIT_MODE,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.VERIFIED, CourseMode.HONOR
):
CourseModeFactory.create(mode_slug=mode, course_id=self.course.id) # pylint: disable=no-member
self.verification_deadline = VerificationDeadline(
course_key=self.course.id, # pylint: disable=no-member
deadline=datetime.now(UTC) + timedelta(days=365)
)
self.verification_deadline.save()
CourseEnrollmentFactory.create(mode=CourseMode.AUDIT, user=self.student, course_id=self.course.id) # pylint: disable=no-member
self.url = reverse('support:enrollment_list', kwargs={'username_or_email': self.student.username}) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_get_enrollments(self, search_string_type):
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertDictContainsSubset({
'mode': CourseMode.AUDIT,
'manual_enrollment': {},
'user': self.student.username,
'course_id': unicode(self.course.id), # pylint: disable=no-member
'is_active': True,
'verified_upgrade_deadline': None,
}, data[0])
self.assertEqual(
{CourseMode.VERIFIED, CourseMode.AUDIT, CourseMode.HONOR,
CourseMode.NO_ID_PROFESSIONAL_MODE, CourseMode.PROFESSIONAL},
{mode['slug'] for mode in data[0]['course_modes']}
) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_change_enrollment(self, search_string_type):
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': CourseMode.VERIFIED,
'reason': 'Financial Assistance'
})
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(CourseMode.VERIFIED) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_change_enrollment_bad_data(self, data, error_message):
# `self` isn't available from within the DDT declaration, so
# assign the course ID here
if 'course_id' in data and data['course_id'] is None:
data['course_id'] = unicode(self.course.id) # pylint: disable=no-member
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 400)
self.assertIsNotNone(re.match(error_message, response.content))
self.assert_enrollment(CourseMode.AUDIT)
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email)) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_update_enrollment_for_all_modes(self, new_mode):
""" Verify support can changed the enrollment to all available modes
except credit. """
self.assert_update_enrollment('username', new_mode) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_update_enrollment_for_ended_course(self, new_mode):
""" Verify support can changed the enrollment of archived course. """
self.set_course_end_date_and_expiry()
self.assert_update_enrollment('username', new_mode) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_get_enrollments_with_expired_mode(self, search_string_type):
""" Verify that page can get the all modes with archived course. """
self.set_course_end_date_and_expiry()
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.get(url)
self._assert_generated_modes(response) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def test_update_enrollments_with_expired_mode(self, search_string_type):
""" Verify that enrollment can be updated to verified mode. """
self.set_course_end_date_and_expiry()
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_update_enrollment(search_string_type, CourseMode.VERIFIED) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def assert_update_enrollment(self, search_string_type, new_mode):
""" Dry method to update the enrollment and assert response."""
self.assertIsNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
url = reverse(
'support:enrollment_list',
kwargs={'username_or_email': getattr(self.student, search_string_type)}
)
response = self.client.post(url, data={
'course_id': unicode(self.course.id), # pylint: disable=no-member
'old_mode': CourseMode.AUDIT,
'new_mode': new_mode,
'reason': 'Financial Assistance'
})
# Enrollment cannot be changed to credit mode.
if new_mode == CourseMode.CREDIT_MODE:
self.assertEqual(response.status_code, 400)
else:
self.assertEqual(response.status_code, 200)
self.assertIsNotNone(ManualEnrollmentAudit.get_manual_enrollment_by_email(self.student.email))
self.assert_enrollment(new_mode) | Stanford-Online/edx-platform | [
41,
19,
41,
1,
1374606346
] |
def __init__(self, values, index, level=-1, value_columns=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors() | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups)) | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
# if our mask is all True, then we can use our existing dtype
if self.mask.all():
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
# is there a simpler / faster way of doing this?
for i in range(values.shape[1]):
chunk = new_values[:, i * width: (i + 1) * width]
mask_chunk = new_mask[:, i * width: (i + 1) * width]
chunk.flat[self.mask] = self.sorted_values[:, i]
mask_chunk.flat[self.mask] = True
return new_values, new_mask | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels,
labels=result_labels,
names=self.new_index_names,
verify_integrity=False) | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([index,
self[columns]]))
return indexed.unstack(columns) | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree) | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def _unstack_frame(obj, level):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.ix[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns)
return unstacker.get_result() | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
cat = Categorical(index, ordered=True)
return cat.categories, cat.codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = \
zip(*map(factorize, (frame.index, frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels,
labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notnull(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index) | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something
we can safely pass to swaplevel:
We generally want to convert the level number into
a level name, except when columns do not have names,
in which case we must leave as a level number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sortlevel(level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[
lev.take(lab) for lev, lab in
zip(this.columns.levels[:-1], this.columns.labels[:-1])
]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
slice_len = loc.stop - loc.start
# can make more efficient?
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.ix[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.ix[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(frame.columns.levels[level_num])
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2008], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team hr year
0 Red Sox 514 2007
1 Yankees 573 2007
2 Red Sox 545 2008
3 Yankees 526 2008
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
mdata[target] = com._concat_compat([data[col].values for col in names])
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notnull(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols) | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def get_var_names(df, regex):
return df.filter(regex=regex).columns.tolist() | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.16.1
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
>>> get_dummies(df, prefix=['col1', 'col2']):
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
See also ``Series.str.get_dummies``.
"""
from pandas.tools.merge import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(include=['object',
'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did "
"not match the length of the columns "
"being encoded ({2}).")
if com.is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse)
return result | Vvucinic/Wander | [
1,
1,
1,
11,
1449375044
] |
def pack(width, data):
return b''.join(v.to_bytes(width, sys.byteorder, signed=True) for v in data) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_max(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.max(b'', w), 0)
self.assertEqual(audioop.max(bytearray(), w), 0)
self.assertEqual(audioop.max(memoryview(b''), w), 0)
p = packs[w]
self.assertEqual(audioop.max(p(5), w), 5)
self.assertEqual(audioop.max(p(5, -8, -1), w), 8)
self.assertEqual(audioop.max(p(maxvalues[w]), w), maxvalues[w])
self.assertEqual(audioop.max(p(minvalues[w]), w), -minvalues[w])
self.assertEqual(audioop.max(datas[w], w), -minvalues[w]) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_maxpp(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.maxpp(b'', w), 0)
self.assertEqual(audioop.maxpp(bytearray(), w), 0)
self.assertEqual(audioop.maxpp(memoryview(b''), w), 0)
self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.maxpp(datas[w], w),
maxvalues[w] - minvalues[w]) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_avgpp(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.avgpp(b'', w), 0)
self.assertEqual(audioop.avgpp(bytearray(), w), 0)
self.assertEqual(audioop.avgpp(memoryview(b''), w), 0)
self.assertEqual(audioop.avgpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.avgpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.avgpp(datas[1], 1), 196)
self.assertEqual(audioop.avgpp(datas[2], 2), 50534)
self.assertEqual(audioop.avgpp(datas[3], 3), 12937096)
self.assertEqual(audioop.avgpp(datas[4], 4), 3311897002) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_cross(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.cross(b'', w), -1)
self.assertEqual(audioop.cross(bytearray(), w), -1)
self.assertEqual(audioop.cross(memoryview(b''), w), -1)
p = packs[w]
self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_bias(self):
for w in 1, 2, 3, 4:
for bias in 0, 1, -1, 127, -128, 0x7fffffff, -0x80000000:
self.assertEqual(audioop.bias(b'', w, bias), b'')
self.assertEqual(audioop.bias(bytearray(), w, bias), b'')
self.assertEqual(audioop.bias(memoryview(b''), w, bias), b'')
self.assertEqual(audioop.bias(datas[1], 1, 1),
b'\x01\x13\x46\xbc\x80\x81\x00')
self.assertEqual(audioop.bias(datas[1], 1, -1),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, 0x7fffffff),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, -0x80000000),
datas[1])
self.assertEqual(audioop.bias(datas[2], 2, 1),
packs[2](1, 0x1235, 0x4568, -0x4566, -0x8000, -0x7fff, 0))
self.assertEqual(audioop.bias(datas[2], 2, -1),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, 0x7fffffff),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, -0x80000000),
datas[2])
self.assertEqual(audioop.bias(datas[3], 3, 1),
packs[3](1, 0x123457, 0x45678a, -0x456788,
-0x800000, -0x7fffff, 0))
self.assertEqual(audioop.bias(datas[3], 3, -1),
packs[3](-1, 0x123455, 0x456788, -0x45678a,
0x7ffffe, 0x7fffff, -2))
self.assertEqual(audioop.bias(datas[3], 3, 0x7fffffff),
packs[3](-1, 0x123455, 0x456788, -0x45678a,
0x7ffffe, 0x7fffff, -2))
self.assertEqual(audioop.bias(datas[3], 3, -0x80000000),
datas[3])
self.assertEqual(audioop.bias(datas[4], 4, 1),
packs[4](1, 0x12345679, 0x456789ac, -0x456789aa,
-0x80000000, -0x7fffffff, 0))
self.assertEqual(audioop.bias(datas[4], 4, -1),
packs[4](-1, 0x12345677, 0x456789aa, -0x456789ac,
0x7ffffffe, 0x7fffffff, -2))
self.assertEqual(audioop.bias(datas[4], 4, 0x7fffffff),
packs[4](0x7fffffff, -0x6dcba989, -0x3a987656, 0x3a987654,
-2, -1, 0x7ffffffe))
self.assertEqual(audioop.bias(datas[4], 4, -0x80000000),
packs[4](-0x80000000, -0x6dcba988, -0x3a987655, 0x3a987655,
-1, 0, 0x7fffffff)) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_adpcm2lin(self):
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(bytearray(b'\x07\x7f\x7f'), 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(memoryview(b'\x07\x7f\x7f'), 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 2, None),
(packs[2](0, 0xb, 0x29, -0x16, 0x72, -0xb3), (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 3, None),
(packs[3](0, 0xb00, 0x2900, -0x1600, 0x7200,
-0xb300), (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 4, None),
(packs[4](0, 0xb0000, 0x290000, -0x160000, 0x720000,
-0xb30000), (-179, 40)))
# Very cursory test
for w in 1, 2, 3, 4:
self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None),
(b'\0' * w * 10, (0, 0))) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_invalid_adpcm_state(self):
# state must be a tuple or None, not an integer
self.assertRaises(TypeError, audioop.adpcm2lin, b'\0', 1, 555)
self.assertRaises(TypeError, audioop.lin2adpcm, b'\0', 1, 555)
# Issues #24456, #24457: index out of range
self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0, -1))
self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0, 89))
self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0, -1))
self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0, 89))
# value out of range
self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (-0x8001, 0))
self.assertRaises(ValueError, audioop.adpcm2lin, b'\0', 1, (0x8000, 0))
self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (-0x8001, 0))
self.assertRaises(ValueError, audioop.lin2adpcm, b'\0', 1, (0x8000, 0)) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_alaw2lin(self):
encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\
b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff'
src = [-688, -720, -2240, -4032, -9, -3, -1, -27, -244, -82, -106,
688, 720, 2240, 4032, 9, 3, 1, 27, 244, 82, 106]
for w in 1, 2, 3, 4:
decoded = packs[w](*(x << (w * 8) >> 13 for x in src))
self.assertEqual(audioop.alaw2lin(encoded, w), decoded)
self.assertEqual(audioop.alaw2lin(bytearray(encoded), w), decoded)
self.assertEqual(audioop.alaw2lin(memoryview(encoded), w), decoded)
encoded = bytes(range(256))
for w in 2, 3, 4:
decoded = audioop.alaw2lin(encoded, w)
self.assertEqual(audioop.lin2alaw(decoded, w), encoded) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_ulaw2lin(self):
encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\
b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff'
src = [-8031, -4447, -1471, -495, -163, -53, -18, -6, -2, 0,
8031, 4447, 1471, 495, 163, 53, 18, 6, 2, 0]
for w in 1, 2, 3, 4:
decoded = packs[w](*(x << (w * 8) >> 14 for x in src))
self.assertEqual(audioop.ulaw2lin(encoded, w), decoded)
self.assertEqual(audioop.ulaw2lin(bytearray(encoded), w), decoded)
self.assertEqual(audioop.ulaw2lin(memoryview(encoded), w), decoded)
# Current u-law implementation has two codes fo 0: 0x7f and 0xff.
encoded = bytes(range(127)) + bytes(range(128, 256))
for w in 2, 3, 4:
decoded = audioop.ulaw2lin(encoded, w)
self.assertEqual(audioop.lin2ulaw(decoded, w), encoded) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_ratecv(self):
for w in 1, 2, 3, 4:
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(bytearray(), w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(memoryview(b''), w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(b'', w, 5, 8000, 8000, None),
(b'', (-1, ((0, 0),) * 5)))
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 16000, None),
(b'', (-2, ((0, 0),))))
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None)[0],
datas[w])
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 1, 0)[0],
datas[w])
state = None
d1, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
d2, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
self.assertEqual(d1 + d2, b'\000\000\001\001\002\001\000\000\001\001\002')
for w in 1, 2, 3, 4:
d0, state0 = audioop.ratecv(datas[w], w, 1, 8000, 16000, None)
d, state = b'', None
for i in range(0, len(datas[w]), w):
d1, state = audioop.ratecv(datas[w][i:i + w], w, 1,
8000, 16000, state)
d += d1
self.assertEqual(d, d0)
self.assertEqual(state, state0)
expected = {
1: packs[1](0, 0x0d, 0x37, -0x26, 0x55, -0x4b, -0x14),
2: packs[2](0, 0x0da7, 0x3777, -0x2630, 0x5673, -0x4a64, -0x129a),
3: packs[3](0, 0x0da740, 0x377776, -0x262fca,
0x56740c, -0x4a62fd, -0x1298c0),
4: packs[4](0, 0x0da740da, 0x37777776, -0x262fc962,
0x56740da6, -0x4a62fc96, -0x1298bf26),
}
for w in 1, 2, 3, 4:
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 3, 1)[0],
expected[w])
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 30, 10)[0],
expected[w]) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_tomono(self):
for w in 1, 2, 3, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(data2, w, 1, 0), data1)
self.assertEqual(audioop.tomono(data2, w, 0, 1), b'\0' * len(data1))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(data2, w, 0.5, 0.5), data1)
self.assertEqual(audioop.tomono(bytearray(data2), w, 0.5, 0.5),
data1)
self.assertEqual(audioop.tomono(memoryview(data2), w, 0.5, 0.5),
data1) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_findfactor(self):
self.assertEqual(audioop.findfactor(datas[2], datas[2]), 1.0)
self.assertEqual(audioop.findfactor(bytearray(datas[2]),
bytearray(datas[2])), 1.0)
self.assertEqual(audioop.findfactor(memoryview(datas[2]),
memoryview(datas[2])), 1.0)
self.assertEqual(audioop.findfactor(b'\0' * len(datas[2]), datas[2]),
0.0) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_findmax(self):
self.assertEqual(audioop.findmax(datas[2], 1), 5)
self.assertEqual(audioop.findmax(bytearray(datas[2]), 1), 5)
self.assertEqual(audioop.findmax(memoryview(datas[2]), 1), 5) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_byteswap(self):
swapped_datas = {
1: datas[1],
2: packs[2](0, 0x3412, 0x6745, -0x6646, -0x81, 0x80, -1),
3: packs[3](0, 0x563412, -0x7698bb, 0x7798ba, -0x81, 0x80, -1),
4: packs[4](0, 0x78563412, -0x547698bb, 0x557698ba,
-0x81, 0x80, -1),
}
for w in 1, 2, 3, 4:
self.assertEqual(audioop.byteswap(b'', w), b'')
self.assertEqual(audioop.byteswap(datas[w], w), swapped_datas[w])
self.assertEqual(audioop.byteswap(swapped_datas[w], w), datas[w])
self.assertEqual(audioop.byteswap(bytearray(datas[w]), w),
swapped_datas[w])
self.assertEqual(audioop.byteswap(memoryview(datas[w]), w),
swapped_datas[w]) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_issue7673(self):
state = None
for data, size in INVALID_DATA:
size2 = size
self.assertRaises(audioop.error, audioop.getsample, data, size, 0)
self.assertRaises(audioop.error, audioop.max, data, size)
self.assertRaises(audioop.error, audioop.minmax, data, size)
self.assertRaises(audioop.error, audioop.avg, data, size)
self.assertRaises(audioop.error, audioop.rms, data, size)
self.assertRaises(audioop.error, audioop.avgpp, data, size)
self.assertRaises(audioop.error, audioop.maxpp, data, size)
self.assertRaises(audioop.error, audioop.cross, data, size)
self.assertRaises(audioop.error, audioop.mul, data, size, 1.0)
self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.add, data, data, size)
self.assertRaises(audioop.error, audioop.bias, data, size, 0)
self.assertRaises(audioop.error, audioop.reverse, data, size)
self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2)
self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state)
self.assertRaises(audioop.error, audioop.lin2ulaw, data, size)
self.assertRaises(audioop.error, audioop.lin2alaw, data, size)
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def test_wrongsize(self):
data = b'abcdefgh'
state = None
for size in (-1, 0, 5, 1024):
self.assertRaises(audioop.error, audioop.ulaw2lin, data, size)
self.assertRaises(audioop.error, audioop.alaw2lin, data, size)
self.assertRaises(audioop.error, audioop.adpcm2lin, data, size, state) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def new_module(name):
"""**DEPRECATED**
Create a new module.
The module is not entered into sys.modules.
"""
return types.ModuleType(name) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def get_tag():
"""Return the magic tag for .pyc files."""
return sys.implementation.cache_tag | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def source_from_cache(path):
"""**DEPRECATED**
Given the path to a .pyc. file, return the path to its .py file.
The .pyc file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
return util.source_from_cache(path) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def load_source(name, pathname, file=None):
loader = _LoadSourceCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = machinery.SourceFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def load_compiled(name, pathname, file=None):
"""**DEPRECATED**"""
loader = _LoadCompiledCompatibility(name, pathname, file)
spec = util.spec_from_file_location(name, pathname, loader=loader)
if name in sys.modules:
module = _exec(spec, sys.modules[name])
else:
module = _load(spec)
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = SourcelessFileLoader(name, pathname)
module.__spec__.loader = module.__loader__
return module | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def reload(module):
"""**DEPRECATED**
Reload the module and return it.
The module must have been successfully imported before.
"""
return importlib.reload(module) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def load_dynamic(name, path, file=None):
"""**DEPRECATED**
Load an extension module.
"""
import importlib.machinery
loader = importlib.machinery.ExtensionFileLoader(name, path)
# Issue #24748: Skip the sys.modules check in _load_module_shim;
# always load new extension
spec = importlib.machinery.ModuleSpec(
name=name, loader=loader, origin=path)
return _load(spec) | Microvellum/Fluid-Designer | [
69,
30,
69,
37,
1461884765
] |
def setUp(self):
# requires local installation of ICON sample files!
self.gridfile ='../..//example_data/icon/r2b4_amip.nc'
self.datafile = '../../example_data/icon/rms0006_atm_phy_DOM01_ML_0001.nc' | pygeo/pycmbs | [
15,
10,
15,
9,
1391865830
] |
def test_IconInit(self):
x = Icon(None, None, 'None') | pygeo/pycmbs | [
15,
10,
15,
9,
1391865830
] |
def test_IconInitMissingGridFile(self):
x = Icon(self.datafile, 'nothing.nc', 'novar')
with self.assertRaises(ValueError):
x.read() | pygeo/pycmbs | [
15,
10,
15,
9,
1391865830
] |
def handle_bad_input(e: Exception) -> Response:
return make_response(profiled_jsonify({"Error": f"{e}"}), 400) | the-blue-alliance/the-blue-alliance | [
334,
153,
334,
422,
1283632451
] |
def __init__(
self, plotly_name="showticklabels", parent_name="heatmap.colorbar", **kwargs | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24]) | ActiveState/code | [
1884,
686,
1884,
41,
1500923597
] |
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super().__init__(self.get_message()) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, str):
return self.reason
elif isinstance(self.reason, bytes):
return self.reason.decode('utf-8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return '"{}"'.format(str(self.reason)) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error('{0}: {1}', self.error_kind, self.args[0]) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super().__init__(reason, verb, tb) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
path = syspath(path, prefix=False)
path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
return bytestring_path(path) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
"""Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the pathes aren't Unicode strings.
path = bytestring_path(path)
ignore = [bytestring_path(i) for i in ignore]
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(path))
except OSError as exc:
if logger:
logger.warning('could not list directory {}: {}'.format(
displayable_path(path), exc.strerror
))
return
dirs = []
files = []
for base in contents:
base = bytestring_path(base)
# Skip ignored filenames.
skip = False
for pat in ignore:
if fnmatch.fnmatch(base, pat):
if logger:
logger.debug('ignoring {} due to ignore rule {}'.format(
base, pat
))
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(path, base)
if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden:
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from sorted_walk(...)
yield from sorted_walk(cur, ignore, ignore_hidden, logger) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
try:
os.mkdir(syspath(ancestor))
except OSError as exc:
raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc()) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
if root is not None:
root = normpath(root)
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root) + 1:]
else:
# Remove nothing.
return
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
clutter = [bytestring_path(c) for c in clutter]
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
try:
if fnmatch_all(match_paths, clutter):
# Directory contains only clutter (or nothing).
shutil.rmtree(directory)
else:
break
except OSError:
break | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf-8' | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def bytestring_path(path):
"""Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, bytes):
return path
# On Windows, remove the magic prefix added by `syspath`. This makes
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
# round-trip through `syspath`.
if os.path.__name__ == 'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to utf-8.
try:
return path.encode(_fsencoding())
except (UnicodeError, LookupError):
return path.encode('utf-8') | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def displayable_path(path, separator='; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, str):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return str(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf-8', 'ignore') | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def samefile(p1, p2):
"""Safer equality for paths."""
if p1 == p2:
return True
return shutil._samefile(syspath(p1), syspath(p2)) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def copy(path, dest, replace=False):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if not replace and os.path.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest))
try:
shutil.copyfile(path, dest)
except OSError as exc:
raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc()) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.symlink(syspath(path), syspath(dest))
except NotImplementedError:
# raised on python >= 3.2 and Windows versions before Vista
raise FilesystemError('OS does not support symbolic links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
# TODO: Windows version checks can be removed for python 3
if hasattr('sys', 'getwindowsversion'):
if sys.getwindowsversion()[0] < 6: # is before Vista
exc = 'OS does not support symbolic links.'
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc()) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def reflink(path, dest, replace=False, fallback=False):
"""Create a reflink from `dest` to `path`.
Raise an `OSError` if `dest` already exists, unless `replace` is
True. If `path` == `dest`, then do nothing.
If reflinking fails and `fallback` is enabled, try copying the file
instead. Otherwise, raise an error without trying a plain copy.
May raise an `ImportError` if the `reflink` module is not available.
"""
import reflink as pyreflink
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
pyreflink.reflink(path, dest)
except (NotImplementedError, pyreflink.ReflinkImpossibleError):
if fallback:
copy(path, dest, replace)
else:
raise FilesystemError('OS/filesystem does not support reflinks.',
'link', (path, dest), traceback.format_exc()) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def sanitize_path(path, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements; it must be a list of (compiled
regex, replacement string) pairs.
"""
replacements = replacements or CHAR_REPLACE
comps = components(path)
if not comps:
return ''
for i, comp in enumerate(comps):
for regex, repl in replacements:
comp = regex.sub(repl, comp)
comps[i] = comp
return os.path.join(*comps) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def _legalize_stage(path, replacements, length, extension, fragment):
"""Perform a single round of path legalization steps
(sanitation/replacement, encoding from Unicode to bytes,
extension-appending, and truncation). Return the path (Unicode if
`fragment` is set, `bytes` otherwise) and whether truncation was
required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Encode for the filesystem.
if not fragment:
path = bytestring_path(path)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path, length)
return path, path != pre_truncate_path | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def py3_path(path):
"""Convert a bytestring path to Unicode on Python 3 only. On Python
2, return the bytestring path unchanged.
This helps deal with APIs on Python 3 that *only* accept Unicode
(i.e., `str` objects). I philosophically disagree with this
decision, because paths are sadly bytes on Unix, but that's the way
it is. So this function helps us "smuggle" the true bytes data
through APIs that took Python 3's Unicode mandate too seriously.
"""
if isinstance(path, str):
return path
assert isinstance(path, bytes)
return os.fsdecode(path) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if value is None:
return ''
elif isinstance(value, memoryview):
return bytes(value).decode('utf-8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf-8', 'ignore')
else:
return str(value) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def plurality(objs):
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
sequence must contain at least one object.
"""
c = Counter(objs)
if not c:
raise ValueError('sequence must be non-empty')
return c.most_common(1)[0] | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def convert_command_args(args):
"""Convert command arguments to bytestrings on Python 2 and
surrogate-escaped strings on Python 3."""
assert isinstance(args, list)
def convert(arg):
if isinstance(arg, bytes):
arg = arg.decode(arg_encoding(), 'surrogateescape')
return arg
return [convert(a) for a in args] | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited.
Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain
byte strings of the respective output streams.
``cmd`` is a list of arguments starting with the command names. The
arguments are bytes on Unix and strings on Windows.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
cmd = convert_command_args(cmd)
try: # python >= 3.3
devnull = subprocess.DEVNULL
except AttributeError:
devnull = open(os.devnull, 'r+b')
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull,
close_fds=platform.system() != 'Windows',
shell=shell
)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=' '.join(cmd),
output=stdout + stderr,
)
return CommandOutput(stdout, stderr) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def open_anything():
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == 'Darwin':
base_cmd = 'open'
elif sys_name == 'Windows':
base_cmd = 'start'
else: # Assume Unix
base_cmd = 'xdg-open'
return base_cmd | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def interactive_open(targets, command):
"""Open the files in `targets` by `exec`ing a new `command`, given
as a Unicode string. (The new program takes over, and Python
execution ends: this does not fork a subprocess.)
Can raise `OSError`.
"""
assert command
# Split the command string into its arguments.
try:
args = shlex.split(command)
except ValueError: # Malformed shell tokens.
args = [command]
args.insert(0, args[0]) # for argv[0]
args += targets
return os.execlp(*args) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def case_sensitive(path):
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
"""
# A fallback in case the path does not exist.
if not os.path.exists(syspath(path)):
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# If an upper-case version of the path exists but a lower-case
# version does not, then the filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not (os.path.exists(syspath(path.lower())) and
os.path.exists(syspath(path.upper()))):
return True
# Both versions of the path exist on the file system. Check whether
# they refer to different files by their inodes. Alas,
# `os.path.samefile` is only available on Unix systems on Python 2.
if platform.system() != 'Windows':
return not os.path.samefile(syspath(path.lower()),
syspath(path.upper()))
# On Windows, we check whether the canonical, long filenames for the
# files are the same.
lower = _windows_long_path_name(path.lower())
upper = _windows_long_path_name(path.upper())
return lower != upper | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def asciify_path(path, sep_replace):
"""Decodes all unicode characters in a path into ASCII equivalents.
Substitutions are provided by the unidecode module. Path separators in the
input are preserved.
Keyword arguments:
path -- The path to be asciified.
sep_replace -- the string to be used to replace extraneous path separators.
"""
# if this platform has an os.altsep, change it to os.sep.
if os.altsep:
path = path.replace(os.altsep, os.sep)
path_components = path.split(os.sep)
for index, item in enumerate(path_components):
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
if os.altsep:
path_components[index] = unidecode(item).replace(
os.altsep,
sep_replace
)
return os.sep.join(path_components) | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def lazy_property(func):
"""A decorator that creates a lazily evaluated property. On first access,
the property is assigned the return value of `func`. This first value is
stored, so that future accesses do not have to evaluate `func` again.
This behaviour is useful when `func` is expensive to evaluate, and it is
not certain that the result will be needed.
"""
field_name = '_' + func.__name__
@property
@functools.wraps(func)
def wrapper(self):
if hasattr(self, field_name):
return getattr(self, field_name)
value = func(self)
setattr(self, field_name, value)
return value
return wrapper | beetbox/beets | [
11484,
1774,
11484,
509,
1281395840
] |
def __init__(
self, plotly_name="familysrc", parent_name="barpolar.hoverlabel.font", **kwargs | plotly/python-api | [
13052,
2308,
13052,
1319,
1385013188
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.