function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def read_and_pre_process_xml(file_name): """ Read XML file with the name specified in the argument and pre process the xml so that it can be parsed. Pre Processing removes line retune characters (i.e. "\n"). Arguments: file_name (str): Name of the XML file. Returns: (str): Pre Processed contents of the file. """ with open(file_name, 'r') as xml_file: return xml_file.read().replace('\n', '')
edx-solutions/edx-platform
[ 12, 19, 12, 9, 1391522577 ]
def Args(parser): base.URI_FLAG.RemoveFromParser(parser) parser.add_argument( 'json_file', metavar='JSON-FILE', nargs='?', default=None, help=('A file containing JSON data for a single resource or a list of' ' resources of the same type. If omitted then the standard input' ' is read.'))
KaranToor/MA450
[ 1, 1, 1, 4, 1484697944 ]
def GetUriCacheUpdateOp(): """No resource URIs.""" return None
KaranToor/MA450
[ 1, 1, 1, 4, 1484697944 ]
def get_device(): global _cached_device if _cached_device is not None: return _cached_device # Precedence from high to low for device_type in ('XLA_GPU', 'GPU', 'XLA_CPU', 'CPU'): devices = config.list_logical_devices(device_type) if devices: _cached_device = devices[0] return _cached_device raise ValueError('Cannot find any suitable device. Available devices: %s' % config.list_logical_devices())
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def invert_philox(key, value): """Invert the Philox bijection.""" key = np.array(key, dtype=np.uint32) value = np.array(value, dtype=np.uint32) step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32) for n in range(10)[::-1]: key0, key1 = key + n * step v0 = value[3] * 0x991a7cdb & 0xffffffff v2 = value[1] * 0x6d7cae67 & 0xffffffff hi0 = v0 * 0xD2511F53 >> 32 hi1 = v2 * 0xCD9E8D57 >> 32 v1 = hi1 ^ value[0] ^ key0 v3 = hi0 ^ value[2] ^ key1 value = v0, v1, v2, v3 return np.array(value)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def float_cases(shape_dtypes=(None,)): cases = ( # Uniform distribution, with and without range ('uniform', stateless.stateless_random_uniform, random_ops.random_uniform, {}), ('uniform2', stateless.stateless_random_uniform, random_ops.random_uniform, dict(minval=2.2, maxval=7.1)), # Normal distribution, with and without mean+stddev ('normal', stateless.stateless_random_normal, random_ops.random_normal, {}), ('normal2', stateless.stateless_random_normal, random_ops.random_normal, dict(mean=2, stddev=3)), # Truncated normal distribution, with and without mean+stddev ('trnorm', stateless.stateless_truncated_normal, random_ops.truncated_normal, {}), ('trnorm2', stateless.stateless_truncated_normal, random_ops.truncated_normal, dict(mean=3, stddev=4)), ) # Explicitly passing in params because capturing cell variable from loop is # problematic in Python def wrap(op, dtype, shape, shape_dtype, seed, **kwargs): device_type = get_device().device_type # Some dtypes are not supported on some devices if (dtype == dtypes.float16 and device_type in ('XLA_GPU', 'XLA_CPU') or dtype == dtypes.bfloat16 and device_type == 'GPU'): dtype = dtypes.float32 shape_ = (constant_op.constant(shape, dtype=shape_dtype) if shape_dtype is not None else shape) return op(seed=seed, shape=shape_, dtype=dtype, **kwargs) def _name(a): if hasattr(a, 'name'): return a.name else: return a for dtype in dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64: for shape_dtype in shape_dtypes: for shape in (), (3,), (2, 5): for name, stateless_op, stateful_op, kwargs in cases: yield (('%s_%s_%s_%s' % (name, _name(dtype), shape, _name(shape_dtype))).replace( ' ', ''), functools.partial(wrap, stateless_op, dtype, shape, shape_dtype, **kwargs), functools.partial(wrap, stateful_op, dtype, shape, shape_dtype, **kwargs))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def wrap(op, minval, maxval, shape, shape_dtype, dtype, seed, **kwargs): shape_ = (constant_op.constant(shape, dtype=shape_dtype) if shape_dtype is not None else shape) return op( seed=seed, shape=shape_, minval=minval, maxval=maxval, dtype=dtype, **kwargs)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def multinomial_cases(): num_samples = 10 def wrap(op, logits, logits_dtype, output_dtype, seed): return op(seed=seed, logits=constant_op.constant(logits, dtype=logits_dtype), num_samples=num_samples, output_dtype=output_dtype) for logits_dtype in np.float16, np.float32, np.float64: for output_dtype in dtypes.int32, dtypes.int64: for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2], [0.25, 0.75]]): yield ('multinomial', functools.partial(wrap, stateless.stateless_multinomial, logits, logits_dtype, output_dtype), functools.partial(wrap, random_ops.multinomial, logits, logits_dtype, output_dtype))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def wrap(op, alpha, dtype, shape, seed): return op(seed=seed, shape=shape, alpha=constant_op.constant(alpha, dtype=dtype), dtype=dtype)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def poisson_cases(): def wrap(op, lam, lam_dtype, out_dtype, shape, seed): return op(seed=seed, shape=shape, lam=constant_op.constant(lam_dtype(lam), dtype=lam_dtype), dtype=out_dtype) for lam_dtype in np.float16, np.float32, np.float64, np.int32, np.int64: for out_dtype in np.float16, np.float32, np.float64, np.int32, np.int64: for lam in ([[5.5, 1., 2.]], [[7.5, 10.5], [3.8, 8.2], [1.25, 9.75]]): yield ('poisson', functools.partial(wrap, stateless.stateless_random_poisson, lam, lam_dtype, out_dtype, (10,) + tuple(np.shape(lam))), functools.partial(wrap, random_ops.random_poisson, lam, lam_dtype, out_dtype, (10,)))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _test_match(self, case, seed): # Stateless ops should be the same as stateful ops on the first call # after seed scrambling. key = 0x3ec8f720, 0x02461e29 preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64) preseed = preseed[::2] | preseed[1::2] << 32 with ops.device(get_device().name): _, stateless_op, stateful_op = case random_seed.set_random_seed(seed[0]) stateful = stateful_op(seed=seed[1]) pure = stateless_op(seed=preseed) self.assertAllEqual(stateful, pure)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _test_old_and_new_stateless_match(self, case, seed): """Tests that the new stateless ops match the old stateless ones.""" with ops.device(get_device().name): _, stateless_op, _ = case with compat.forward_compatibility_horizon(*BEFORE_EXPIRE): old = stateless_op(seed=seed) with compat.forward_compatibility_horizon(*AFTER_EXPIRE): new = stateless_op(seed=seed) self.assertAllClose(old, new)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _test_determinism(self, case, seed_type): # Stateless values should be equal iff the seeds are equal (roughly) seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension with self.test_session(), ops.device(get_device().name): _, stateless_op, _ = case if context.executing_eagerly(): values = [ (seed, stateless_op(seed=constant_op.constant(seed, seed_type))) for seed in seeds] else: # Have this branch because the above branch is too slow in graph # mode seed_t = array_ops.placeholder(seed_type, shape=[2]) pure = stateless_op(seed=seed_t) values = [ (seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds ] for s0, v0 in values: for s1, v1 in values: if dtypes.as_dtype(v0.dtype) != dtypes.bfloat16: self.assertEqual(s0 == s1, np.all(v0 == v1)) elif s0 == s1: # Skip the s0 != s1 case because v0 and v1 can be either equal or # unequal in that case due to bfloat16's low precision self.assertAllEqual(v0, v1)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testMatchFloat(self, case, seed): if get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Skip on XLA because XLA kernels do not support int64 ' 'seeds needed by this test.') self._test_match(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testMatchInt(self, case, seed): if get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Skip on XLA because XLA kernels do not support int64 ' 'seeds needed by this test.') self._test_match(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testMatchMultinomial(self, case, seed): if get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking XLA kernel') self._test_match(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testMatchGamma(self, case, seed): if get_device().device_type == 'GPU': # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking GPU kernel') if get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking XLA kernel') self._test_match(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testStatelessGammaCpuGpuMatch(self, case, seed): if get_device().device_type != 'GPU': # This test compares the numbers produced by the CPU and GPU kernel for # stateless_random_gamma. self.skipTest('This test requires GPU') self._test_match_stateless_cpu_gpu(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testMatchPoisson(self, case, seed): if get_device().device_type == 'GPU': # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking GPU kernel') if get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking XLA kernel') self._test_match(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testOldAndNewStatelessMatchFloat(self, case, seed): self._test_old_and_new_stateless_match(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testOldAndNewStatelessMatchInt(self, case, seed): self._test_old_and_new_stateless_match(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testExplicitAlgFloat(self, case): seed = (7, 17) self._test_explicit_alg(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testExplicitAlgInt(self, case): seed = (7, 17) self._test_explicit_alg(case, seed)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDeterminismFloat(self, case, seed_type): if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest( 'Skip on XLA because XLA kernels do not support int64 seeds.') self._test_determinism(case, seed_type)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDeterminismInt(self, case, seed_type): if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest( 'Skip on XLA because XLA kernels do not support int64 seeds.') self._test_determinism(case, seed_type)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDeterminismMultinomial(self, case, seed_type): if get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking XLA kernel') self._test_determinism(case, seed_type)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDeterminismGamma(self, case, seed_type): if get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking XLA kernel') self._test_determinism(case, seed_type)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testDeterminismPoisson(self, case, seed_type): if get_device().device_type == 'GPU': # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking GPU kernel') if get_device().device_type in ('XLA_GPU', 'XLA_CPU'): # This test was passing before because soft placement silently picked the # CPU kernels. self.skipTest('Lacking XLA kernel') self._test_determinism(case, seed_type)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testGetKeyCounterAlg(self): seed = [1, 2] key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter( seed) self.assertAllEqual(key.shape, [1]) self.assertAllEqual(counter.shape, [2]) alg = gen_stateless_random_ops_v2.stateless_random_get_alg() self.assertAllEqual(alg.shape, [])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def assertNoEqualPair(self, ls): for i in range(len(ls)): for j in range(i + 1, len(ls)): self.assertFalse(math_ops.reduce_all(ls[i] == ls[j]))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testSplit(self, dtype): """Test for `split`.""" seed = constant_op.constant([1, 2], dtype=dtype) new_seed = stateless.split(seed, 3) self.assertEqual(new_seed.shape, [3, 2]) self.assertDTypeEqual(new_seed.dtype, dtype) self.assertNoEqualPair([seed] + array_ops.unstack(new_seed))
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testFoldIn(self, dtype): """Test for `fold_in`.""" orig_seed = constant_op.constant([1, 2], dtype='int32') seed = stateless.fold_in(orig_seed, constant_op.constant(3, dtype=dtype)) new_seeds = [] new_seeds.append(seed) seed = stateless.fold_in(seed, constant_op.constant(4, dtype=dtype)) new_seeds.append(seed) for s in new_seeds: self.assertEqual(s.shape, [2]) self.assertDTypeEqual(s.dtype, dtype) self.assertNoEqualPair([math_ops.cast(orig_seed, dtype)] + new_seeds)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def testErrors(self): """Tests that proper errors are raised. """ shape = [2, 3] with self.assertRaisesWithPredicateMatch( ValueError, 'minval must be a scalar; got a tensor of shape '): @def_function.function def f(): stateless.stateless_random_uniform( shape=shape, seed=[1, 2], minval=array_ops.zeros(shape, 'int32'), maxval=100, dtype='int32') f() with self.assertRaisesWithPredicateMatch( ValueError, 'maxval must be a scalar; got a tensor of shape '): @def_function.function def f2(): stateless.stateless_random_uniform( shape=shape, seed=[1, 2], minval=0, maxval=array_ops.ones(shape, 'int32') * 100, dtype='int32') f2()
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__(self, module, **kw): self.module = module if len(kw) == 0: # pragma: NO COVER raise ValueError('_Monkey was used with nothing to monkey-patch') self.to_restore = {key: getattr(module, key) for key in kw} for key, value in kw.items(): setattr(module, key, value)
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def __exit__(self, exc_type, exc_val, exc_tb): for key, value in self.to_restore.items(): setattr(self.module, key, value)
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def __init__(self, suffix=''): import os import tempfile filehandle, self.name = tempfile.mkstemp(suffix=suffix) os.close(filehandle)
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def __exit__(self, exc_type, exc_val, exc_tb): import os os.remove(self.name)
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def _tempdir_mgr(): temp_dir = tempfile.mkdtemp() yield temp_dir shutil.rmtree(temp_dir)
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def __init__(self, **kw): self.__dict__.update(kw)
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def _make_grpc_not_found(self): from grpc import StatusCode return self._make_grpc_error(StatusCode.NOT_FOUND)
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def _make_grpc_deadline_exceeded(self): from grpc import StatusCode return self._make_grpc_error(StatusCode.DEADLINE_EXCEEDED)
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def __init__(self, *pages, **kwargs): self._pages = iter(pages) self.page_token = kwargs.get('page_token')
axbaretto/beam
[ 9, 2, 9, 74, 1474583398 ]
def has_pairs(profile): """Returns True if the Python that produced the argument profile was built with -DDXPAIRS."""
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def reset_profile(): """Forgets any execution profile that has been gathered so far.""" with _profile_lock: sys.getdxp() # Resets the internal profile global _cumulative_profile _cumulative_profile = sys.getdxp() # 0s out our copy.
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def merge_profile(): """Reads sys.getdxp() and merges it into this module's cached copy.
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def snapshot_profile(): """Returns the cumulative execution profile until this call.""" with _profile_lock: merge_profile() return copy.deepcopy(_cumulative_profile)
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def common_instructions(profile): """Returns the most common opcodes in order of descending frequency.
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def common_pairs(profile): """Returns the most common opcode pairs in order of descending frequency.
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def render_common_pairs(profile=None): """Renders the most common opcode pairs to a string in order of descending frequency.
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def seq(): for _, ops, count in common_pairs(profile): yield "%s: %s\n" % (count, ops)
google/google-ctf
[ 3196, 457, 3196, 1, 1524844563 ]
def __init__(self, msg): Exception.__init__(self) print msg
aptana/Pydev
[ 239, 85, 239, 6, 1250792405 ]
def __init__(self): Exception.__init__(self) print "i am a fooexception"
aptana/Pydev
[ 239, 85, 239, 6, 1250792405 ]
def test_maps(self): try: maps = nis.maps() except nis.error as msg: # NIS is probably not active, so this test isn't useful self.skipTest(str(msg)) try: # On some systems, this map is only accessible to the # super user maps.remove("passwd.adjunct.byname") except ValueError: pass done = 0 for nismap in maps: mapping = nis.cat(nismap) for k, v in mapping.items(): if not k: continue if nis.match(k, nismap) != v: self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap)) else: # just test the one key, otherwise this test could take a # very long time done = 1 break if done: break
Microvellum/Fluid-Designer
[ 69, 30, 69, 37, 1461884765 ]
def setUp(self): dti = DatetimeIndex(start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq='Min') self.series = Series(np.random.rand(len(dti)), dti)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_basic(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min', name='index') s = Series(np.random.randn(14), index=rng) result = s.resample('5min', how='mean', closed='right', label='right') exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index') expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()], index=exp_idx) assert_series_equal(result, expected) self.assertEqual(result.index.name, 'index') result = s.resample('5min', how='mean', closed='left', label='right') exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min', name='index') expected = Series([s[:5].mean(), s[5:10].mean(), s[10:].mean()], index=exp_idx) assert_series_equal(result, expected) s = self.series result = s.resample('5Min', how='last') grouper = TimeGrouper(Minute(5), closed='left', label='left') expect = s.groupby(grouper).agg(lambda x: x[-1]) assert_series_equal(result, expect)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def _ohlc(group): if isnull(group).all(): return np.repeat(np.nan, 4) return [group[0], group.max(), group.min(), group[-1]]
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_how_callables(self): # GH 7929 data = np.arange(5, dtype=np.int64) ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d') df = pd.DataFrame({"A": data, "B": data}, index=ind) def fn(x, a=1): return str(type(x)) class fn_class: def __call__(self, x): return str(type(x)) df_standard = df.resample("M", how=fn) df_lambda = df.resample("M", how=lambda x: str(type(x))) df_partial = df.resample("M", how=partial(fn)) df_partial2 = df.resample("M", how=partial(fn, a=2)) df_class = df.resample("M", how=fn_class()) assert_frame_equal(df_standard, df_lambda) assert_frame_equal(df_standard, df_partial) assert_frame_equal(df_standard, df_partial2) assert_frame_equal(df_standard, df_class)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_rounding(self): # GH 8371 # odd results when rounding is needed data = """date,time,value
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_basic_from_daily(self): # from daily dti = DatetimeIndex( start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq='D', name='index') s = Series(np.random.rand(len(dti)), dti) # to weekly result = s.resample('w-sun', how='last') self.assertEqual(len(result), 3) self.assertTrue((result.index.dayofweek == [6, 6, 6]).all()) self.assertEqual(result.iloc[0], s['1/2/2005']) self.assertEqual(result.iloc[1], s['1/9/2005']) self.assertEqual(result.iloc[2], s.iloc[-1]) result = s.resample('W-MON', how='last') self.assertEqual(len(result), 2) self.assertTrue((result.index.dayofweek == [0, 0]).all()) self.assertEqual(result.iloc[0], s['1/3/2005']) self.assertEqual(result.iloc[1], s['1/10/2005']) result = s.resample('W-TUE', how='last') self.assertEqual(len(result), 2) self.assertTrue((result.index.dayofweek == [1, 1]).all()) self.assertEqual(result.iloc[0], s['1/4/2005']) self.assertEqual(result.iloc[1], s['1/10/2005']) result = s.resample('W-WED', how='last') self.assertEqual(len(result), 2) self.assertTrue((result.index.dayofweek == [2, 2]).all()) self.assertEqual(result.iloc[0], s['1/5/2005']) self.assertEqual(result.iloc[1], s['1/10/2005']) result = s.resample('W-THU', how='last') self.assertEqual(len(result), 2) self.assertTrue((result.index.dayofweek == [3, 3]).all()) self.assertEqual(result.iloc[0], s['1/6/2005']) self.assertEqual(result.iloc[1], s['1/10/2005']) result = s.resample('W-FRI', how='last') self.assertEqual(len(result), 2) self.assertTrue((result.index.dayofweek == [4, 4]).all()) self.assertEqual(result.iloc[0], s['1/7/2005']) self.assertEqual(result.iloc[1], s['1/10/2005']) # to biz day result = s.resample('B', how='last') self.assertEqual(len(result), 7) self.assertTrue((result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()) self.assertEqual(result.iloc[0], s['1/2/2005']) self.assertEqual(result.iloc[1], s['1/3/2005']) self.assertEqual(result.iloc[5], s['1/9/2005']) self.assertEqual(result.index.name, 'index')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_frame_basic(self): df = tm.makeTimeDataFrame() b = TimeGrouper('M') g = df.groupby(b) # check all cython functions work funcs = ['add', 'mean', 'prod', 'min', 'max', 'var'] for f in funcs: g._cython_agg_general(f) result = df.resample('A') assert_series_equal(result['A'], df['A'].resample('A')) result = df.resample('M') assert_series_equal(result['A'], df['A'].resample('M')) df.resample('M', kind='period') df.resample('W-WED', kind='period')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_upsample(self): # from daily dti = DatetimeIndex( start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq='D', name='index') s = Series(np.random.rand(len(dti)), dti) # to minutely, by padding result = s.resample('Min', fill_method='pad') self.assertEqual(len(result), 12961) self.assertEqual(result[0], s[0]) self.assertEqual(result[-1], s[-1]) self.assertEqual(result.index.name, 'index')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_upsample_with_limit(self): rng = date_range('1/1/2000', periods=3, freq='5t') ts = Series(np.random.randn(len(rng)), rng) result = ts.resample('t', fill_method='ffill', limit=2) expected = ts.reindex(result.index, method='ffill', limit=2) assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_ohlc_dataframe(self): df = (pd.DataFrame({'PRICE': {Timestamp('2011-01-06 10:59:05', tz=None): 24990, Timestamp('2011-01-06 12:43:33', tz=None): 25499, Timestamp('2011-01-06 12:54:09', tz=None): 25499}, 'VOLUME': {Timestamp('2011-01-06 10:59:05', tz=None): 1500000000, Timestamp('2011-01-06 12:43:33', tz=None): 5000000000, Timestamp('2011-01-06 12:54:09', tz=None): 100000000}}) ).reindex_axis(['VOLUME', 'PRICE'], axis=1) res = df.resample('H', how='ohlc') exp = pd.concat([df['VOLUME'].resample('H', how='ohlc'), df['PRICE'].resample('H', how='ohlc')], axis=1, keys=['VOLUME', 'PRICE']) assert_frame_equal(exp, res) df.columns = [['a', 'b'], ['c', 'd']] res = df.resample('H', how='ohlc') exp.columns = pd.MultiIndex.from_tuples([('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'), ('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'), ('b', 'd', 'low'), ('b', 'd', 'close')]) assert_frame_equal(exp, res) # dupe columns fail atm # df.columns = ['PRICE', 'PRICE']
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_reresample(self): dti = DatetimeIndex( start=datetime(2005, 1, 1), end=datetime(2005, 1, 10), freq='D') s = Series(np.random.rand(len(dti)), dti) bs = s.resample('B', closed='right', label='right') result = bs.resample('8H') self.assertEqual(len(result), 22) tm.assertIsInstance(result.index.freq, offsets.DateOffset) self.assertEqual(result.index.freq, offsets.Hour(8))
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_ohlc_5min(self): def _ohlc(group): if isnull(group).all(): return np.repeat(np.nan, 4) return [group[0], group.max(), group.min(), group[-1]] rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s') ts = Series(np.random.randn(len(rng)), index=rng) resampled = ts.resample('5min', how='ohlc', closed='right', label='right') self.assertTrue((resampled.ix['1/1/2000 00:00'] == ts[0]).all()) exp = _ohlc(ts[1:31]) self.assertTrue((resampled.ix['1/1/2000 00:05'] == exp).all()) exp = _ohlc(ts['1/1/2000 5:55:01':]) self.assertTrue((resampled.ix['1/1/2000 6:00:00'] == exp).all())
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_asfreq_non_unique(self): # GH #1077 rng = date_range('1/1/2000', '2/29/2000') rng2 = rng.repeat(2).values ts = Series(np.random.randn(len(rng2)), index=rng2) self.assertRaises(Exception, ts.asfreq, 'B')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_panel(self): rng = date_range('1/1/2000', '6/30/2000') n = len(rng) panel = Panel(np.random.randn(3, n, 5), items=['one', 'two', 'three'], major_axis=rng, minor_axis=['a', 'b', 'c', 'd', 'e']) result = panel.resample('M', axis=1) def p_apply(panel, f): result = {} for item in panel.items: result[item] = f(panel[item]) return Panel(result, items=panel.items) expected = p_apply(panel, lambda x: x.resample('M')) tm.assert_panel_equal(result, expected) panel2 = panel.swapaxes(1, 2) result = panel2.resample('M', axis=2) expected = p_apply(panel2, lambda x: x.resample('M', axis=1)) tm.assert_panel_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_anchored_ticks(self): # If a fixed delta (5 minute, 4 hour) evenly divides a day, we should # "anchor" the origin at midnight so we get regular intervals rather # than starting from the first timestamp which might start in the middle # of a desired interval rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s') ts = Series(np.random.randn(len(rng)), index=rng) ts[:2] = np.nan # so results are the same freqs = ['t', '5t', '15t', '30t', '4h', '12h'] for freq in freqs: result = ts[2:].resample(freq, closed='left', label='left') expected = ts.resample(freq, closed='left', label='left') assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_base(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s') ts = Series(np.random.randn(len(rng)), index=rng) resampled = ts.resample('5min', base=2) exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57', freq='5min') self.assertTrue(resampled.index.equals(exp_rng))
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_daily_anchored(self): rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T') ts = Series(np.random.randn(len(rng)), index=rng) ts[:2] = np.nan # so results are the same result = ts[2:].resample('D', closed='left', label='left') expected = ts.resample('D', closed='left', label='left') assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_empty(self): ts = _simple_ts('1/1/2000', '2/1/2000')[:0] result = ts.resample('A') self.assertEqual(len(result), 0) self.assertEqual(result.index.freqstr, 'A-DEC') result = ts.resample('A', kind='period') self.assertEqual(len(result), 0) self.assertEqual(result.index.freqstr, 'A-DEC') xp = DataFrame() rs = xp.resample('A') assert_frame_equal(xp, rs) # Empty series were sometimes causing a segfault (for the functions # with Cython bounds-checking disabled) or an IndexError. We just run # them to ensure they no longer do. (GH #10228) for index in tm.all_timeseries_index_generator(0): for dtype in (np.float, np.int, np.object, 'datetime64[ns]'): for how in ('count', 'mean', 'min', 'ohlc', 'last', 'prod'): empty_series = pd.Series([], index, dtype) try: empty_series.resample('d', how) except DataError: # Ignore these since some combinations are invalid # (ex: doing mean with dtype of np.object) pass
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_monthly_resample_error(self): # #1451 dates = date_range('4/16/2012 20:00', periods=5000, freq='h') ts = Series(np.random.randn(len(dates)), index=dates) # it works! result = ts.resample('M')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_anchored_monthstart(self): ts = _simple_ts('1/1/2000', '12/31/2002') freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN'] for freq in freqs: result = ts.resample(freq, how='mean')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_corner_cases(self): # miscellaneous test coverage rng = date_range('1/1/2000', periods=12, freq='t') ts = Series(np.random.randn(len(rng)), index=rng) result = ts.resample('5t', closed='right', label='left') ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t') self.assertTrue(result.index.equals(ex_index)) len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0] # it works result = len0pts.resample('A-DEC') self.assertEqual(len(result), 0) # resample to periods ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h') result = ts.resample('M', kind='period') self.assertEqual(len(result), 1) self.assertEqual(result.index[0], Period('2000-04', freq='M'))
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_upsample_apply_functions(self): # #1596 rng = pd.date_range('2012-06-12', periods=4, freq='h') ts = Series(np.random.randn(len(rng)), index=rng) result = ts.resample('20min', how=['mean', 'sum']) tm.assertIsInstance(result, DataFrame)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_median_bug_1688(self): for dtype in ['int64','int32','float64','float32']: df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0), datetime(2012, 1, 1, 0, 5, 0)], dtype = dtype) result = df.resample("T", how=lambda x: x.mean()) exp = df.asfreq('T') tm.assert_frame_equal(result, exp) result = df.resample("T", how="median") exp = df.asfreq('T') tm.assert_frame_equal(result, exp)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_unequal_times(self): # #1772 start = datetime(1999, 3, 1, 5) # end hour is less than start end = datetime(2012, 7, 31, 4) bad_ind = date_range(start, end, freq="30min") df = DataFrame({'close': 1}, index=bad_ind) # it works! df.resample('AS', 'sum')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_timegrouper(self): # GH 7227 dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3), datetime(2014, 11, 5), datetime(2014, 9, 5), datetime(2014, 10, 8), datetime(2014, 7, 15)] dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:] dates3 = [pd.NaT] + dates1 + [pd.NaT] for dates in [dates1, dates2, dates3]: df = DataFrame(dict(A=dates, B=np.arange(len(dates)))) result = df.set_index('A').resample('M', how='count') exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31', '2014-09-30', '2014-10-31', '2014-11-30'], freq='M', name='A') expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx) assert_frame_equal(result, expected) result = df.groupby(pd.Grouper(freq='M', key='A')).count() assert_frame_equal(result, expected) df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(len(dates)))) result = df.set_index('A').resample('M', how='count') expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]}, index=exp_idx, columns=['B', 'C']) assert_frame_equal(result, expected) result = df.groupby(pd.Grouper(freq='M', key='A')).count() assert_frame_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_size(self): n = 10000 dr = date_range('2015-09-19', periods=n, freq='T') ts = Series(np.random.randn(n), index=np.random.choice(dr, n)) left = ts.resample('7T', how='size') ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T') bins = np.searchsorted(ix.values, ts.index.values, side='right') val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64',copy=False) right = Series(val, index=ix) assert_series_equal(left, right)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def _simple_ts(start, end, freq='D'): rng = date_range(start, end, freq=freq) return Series(np.random.randn(len(rng)), index=rng)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_annual_upsample_D_s_f(self): self._check_annual_upsample_cases('D', 'start', 'ffill')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_annual_upsample_D_s_b(self): self._check_annual_upsample_cases('D', 'start', 'bfill')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_annual_upsample_B_s_f(self): self._check_annual_upsample_cases('B', 'start', 'ffill')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_annual_upsample_B_s_b(self): self._check_annual_upsample_cases('B', 'start', 'bfill')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_annual_upsample_M_s_f(self): self._check_annual_upsample_cases('M', 'start', 'ffill')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_annual_upsample_M_s_b(self): self._check_annual_upsample_cases('M', 'start', 'bfill')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'): for month in MONTHS: ts = _simple_pts('1/1/1990', end, freq='A-%s' % month) result = ts.resample(targ, fill_method=meth, convention=conv) expected = result.to_timestamp(targ, how=conv) expected = expected.asfreq(targ, meth).to_period() assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_not_subperiod(self): # These are incompatible period rules for resampling ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed') self.assertRaises(ValueError, ts.resample, 'a-dec') self.assertRaises(ValueError, ts.resample, 'q-mar') self.assertRaises(ValueError, ts.resample, 'M') self.assertRaises(ValueError, ts.resample, 'w-thu')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_upsample_with_limit(self): rng = period_range('1/1/2000', periods=5, freq='A') ts = Series(np.random.randn(len(rng)), rng) result = ts.resample('M', fill_method='ffill', limit=2, convention='end') expected = ts.asfreq('M').reindex(result.index, method='ffill', limit=2) assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_quarterly_upsample(self): targets = ['D', 'B', 'M'] for month in MONTHS: ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month) for targ, conv in product(targets, ['start', 'end']): result = ts.resample(targ, fill_method='ffill', convention=conv) expected = result.to_timestamp(targ, how=conv) expected = expected.asfreq(targ, 'ffill').to_period() assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_fill_method_and_how_upsample(self): # GH2073 s = Series(np.arange(9,dtype='int64'), index=date_range('2010-01-01', periods=9, freq='Q')) last = s.resample('M', fill_method='ffill') both = s.resample('M', how='last', fill_method='ffill').astype('int64') assert_series_equal(last, both)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_to_timestamps(self): ts = _simple_pts('1/1/1990', '12/31/1995', freq='M') result = ts.resample('A-DEC', kind='timestamp') expected = ts.to_timestamp(how='end').resample('A-DEC') assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_fill_missing(self): rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A') s = Series(np.random.randn(4), index=rng) stamps = s.to_timestamp() filled = s.resample('A') expected = stamps.resample('A').to_period('A') assert_series_equal(filled, expected) filled = s.resample('A', fill_method='ffill') expected = stamps.resample('A', fill_method='ffill').to_period('A') assert_series_equal(filled, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_5minute(self): rng = period_range('1/1/2000', '1/5/2000', freq='T') ts = Series(np.random.randn(len(rng)), index=rng) result = ts.resample('5min') expected = ts.to_timestamp().resample('5min') assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_empty(self): ts = _simple_pts('1/1/2000', '2/1/2000')[:0] result = ts.resample('A') self.assertEqual(len(result), 0)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_weekly_all_na(self): rng = date_range('1/1/2000', periods=10, freq='W-WED') ts = Series(np.random.randn(len(rng)), index=rng) result = ts.resample('W-THU') self.assertTrue(result.isnull().all()) result = ts.resample('W-THU', fill_method='ffill')[:-1] expected = ts.asfreq('W-THU', method='ffill') assert_series_equal(result, expected)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_closed_left_corner(self): # #1465 s = Series(np.random.randn(21), index=date_range(start='1/1/2012 9:30', freq='1min', periods=21)) s[0] = np.nan result = s.resample('10min', how='mean', closed='left', label='right') exp = s[1:].resample('10min', how='mean', closed='left', label='right') assert_series_equal(result, exp) result = s.resample('10min', how='mean', closed='left', label='left') exp = s[1:].resample('10min', how='mean', closed='left', label='left') ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3) self.assertTrue(result.index.equals(ex_index)) assert_series_equal(result, exp)
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_resample_weekly_bug_1726(self): # 8/6/12 is a Monday ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D") n = len(ind) data = [[x] * 5 for x in range(n)] df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'], index=ind) # it works! df.resample('W-MON', how='first', closed='left', label='left')
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_default_right_closed_label(self): end_freq = ['D', 'Q', 'M', 'D'] end_types = ['M', 'A', 'Q', 'W'] for from_freq, to_freq in zip(end_freq, end_types): idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq) df = DataFrame(np.random.randn(len(idx), 2), idx) resampled = df.resample(to_freq) assert_frame_equal(resampled, df.resample(to_freq, closed='right', label='right'))
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]
def test_all_values_single_bin(self): # 2070 index = period_range(start="2012-01-01", end="2012-12-31", freq="M") s = Series(np.random.randn(len(index)), index=index) result = s.resample("A", how='mean') tm.assert_almost_equal(result[0], s.mean())
Vvucinic/Wander
[ 1, 1, 1, 11, 1449375044 ]