Dataset Viewer
Auto-converted to Parquet Duplicate
repo
stringclasses
4 values
path
stringlengths
20
56
func_name
stringlengths
3
41
original_string
stringlengths
140
10k
language
stringclasses
1 value
code
stringlengths
140
10k
code_tokens
sequencelengths
22
929
docstring
stringlengths
18
8.4k
docstring_tokens
sequencelengths
2
122
sha
stringclasses
4 values
url
stringlengths
108
167
partition
stringclasses
1 value
summary
stringlengths
17
285
input_ids
sequencelengths
502
502
token_type_ids
sequencelengths
502
502
attention_mask
sequencelengths
502
502
labels
sequencelengths
502
502
pandas-dev/pandas
pandas/core/groupby/groupby.py
GroupBy.head
def head(self, n=5): """ Return first n rows of each group. Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], ...
python
def head(self, n=5): """ Return first n rows of each group. Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], ...
[ "def", "head", "(", "self", ",", "n", "=", "5", ")", ":", "self", ".", "_reset_group_selection", "(", ")", "mask", "=", "self", ".", "_cumcount_array", "(", ")", "<", "n", "return", "self", ".", "_selected_obj", "[", "mask", "]" ]
Return first n rows of each group. Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) >>> df.gr...
[ "Return", "first", "n", "rows", "of", "each", "group", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L2137-L2160
train
Return first n rows of each group.
[ 30522, 13366, 2132, 1006, 2969, 1010, 1050, 1027, 1019, 1007, 1024, 1000, 1000, 1000, 2709, 2034, 1050, 10281, 1997, 2169, 2177, 1012, 7687, 5662, 2000, 1036, 1036, 1012, 6611, 1006, 23375, 1060, 1024, 1060, 1012, 2132, 1006, 1050, 1007, ...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/mllib/util.py
MLUtils.appendBias
def appendBias(data): """ Returns a new vector with `1.0` (bias) appended to the end of the input vector. """ vec = _convert_to_vector(data) if isinstance(vec, SparseVector): newIndices = np.append(vec.indices, len(vec)) newValues = np.append(vec.v...
python
def appendBias(data): """ Returns a new vector with `1.0` (bias) appended to the end of the input vector. """ vec = _convert_to_vector(data) if isinstance(vec, SparseVector): newIndices = np.append(vec.indices, len(vec)) newValues = np.append(vec.v...
[ "def", "appendBias", "(", "data", ")", ":", "vec", "=", "_convert_to_vector", "(", "data", ")", "if", "isinstance", "(", "vec", ",", "SparseVector", ")", ":", "newIndices", "=", "np", ".", "append", "(", "vec", ".", "indices", ",", "len", "(", "vec", ...
Returns a new vector with `1.0` (bias) appended to the end of the input vector.
[ "Returns", "a", "new", "vector", "with", "1", ".", "0", "(", "bias", ")", "appended", "to", "the", "end", "of", "the", "input", "vector", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L177-L188
train
Returns a new vector with 1. 0 ( bias ) appended to the end of the input vector.
[ 30522, 13366, 10439, 10497, 11607, 2015, 1006, 2951, 1007, 1024, 1000, 1000, 1000, 5651, 1037, 2047, 9207, 2007, 1036, 1015, 1012, 1014, 1036, 1006, 13827, 1007, 10439, 21945, 2000, 1996, 2203, 1997, 1996, 7953, 9207, 1012, 1000, 1000, 1000...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/shuffle.py
ExternalGroupBy._spill
def _spill(self): """ dump already partitioned data into disks. """ global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(path) used_memory = get_used_memory() if not self....
python
def _spill(self): """ dump already partitioned data into disks. """ global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(path) used_memory = get_used_memory() if not self....
[ "def", "_spill", "(", "self", ")", ":", "global", "MemoryBytesSpilled", ",", "DiskBytesSpilled", "path", "=", "self", ".", "_get_spill_dir", "(", "self", ".", "spills", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", "....
dump already partitioned data into disks.
[ "dump", "already", "partitioned", "data", "into", "disks", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L709-L766
train
Dump already partitioned data into disks.
[ 30522, 13366, 1035, 14437, 1006, 2969, 1007, 1024, 1000, 1000, 1000, 15653, 2525, 13571, 2098, 2951, 2046, 23999, 1012, 1000, 1000, 1000, 3795, 3638, 3762, 4570, 13102, 10484, 2094, 1010, 9785, 3762, 4570, 13102, 10484, 2094, 4130, 1027, 29...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
huggingface/pytorch-pretrained-BERT
pytorch_pretrained_bert/modeling_transfo_xl.py
TransfoXLPreTrainedModel.from_pretrained
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs): """ Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if n...
python
def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs): """ Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if n...
[ "def", "from_pretrained", "(", "cls", ",", "pretrained_model_name_or_path", ",", "state_dict", "=", "None", ",", "cache_dir", "=", "None", ",", "from_tf", "=", "False", ",", "*", "inputs", ",", "*", "*", "kwargs", ")", ":", "if", "pretrained_model_name_or_path...
Instantiate a TransfoXLPreTrainedModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name_or_path: either: - a str with the name of a pre-trained model to load selected in the list of:...
[ "Instantiate", "a", "TransfoXLPreTrainedModel", "from", "a", "pre", "-", "trained", "model", "file", "or", "a", "pytorch", "state", "dict", ".", "Download", "and", "cache", "the", "pre", "-", "trained", "model", "file", "if", "needed", "." ]
b832d5bb8a6dfc5965015b828e577677eace601e
https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_transfo_xl.py#L891-L986
train
Instantiate a TransfoXLPreTrainedModel from a pre - trained model file or a pytorch state dict.
[ 30522, 13366, 2013, 1035, 3653, 23654, 2098, 1006, 18856, 2015, 1010, 3653, 23654, 2098, 1035, 2944, 1035, 2171, 1035, 2030, 1035, 4130, 1010, 2110, 1035, 4487, 6593, 1027, 3904, 1010, 17053, 1035, 16101, 1027, 3904, 1010, 2013, 1035, 1056,...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/rdd.py
RDD.aggregate
def aggregate(self, zeroValue, seqOp, combOp): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as i...
python
def aggregate(self, zeroValue, seqOp, combOp): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as i...
[ "def", "aggregate", "(", "self", ",", "zeroValue", ",", "seqOp", ",", "combOp", ")", ":", "seqOp", "=", "fail_on_stopiteration", "(", "seqOp", ")", "combOp", "=", "fail_on_stopiteration", "(", "combOp", ")", "def", "func", "(", "iterator", ")", ":", "acc",...
Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not ...
[ "Aggregate", "the", "elements", "of", "each", "partition", "and", "then", "the", "results", "for", "all", "the", "partitions", "using", "a", "given", "combine", "functions", "and", "a", "neutral", "zero", "value", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L922-L955
train
Aggregate the elements of each partition and then the results for all the partitions using a given combine functions and a neutral zeroValue value.
[ 30522, 13366, 9572, 1006, 2969, 1010, 5717, 10175, 5657, 1010, 7367, 4160, 7361, 1010, 25025, 2361, 1007, 1024, 1000, 1000, 1000, 9572, 1996, 3787, 1997, 2169, 13571, 1010, 1998, 2059, 1996, 3463, 2005, 2035, 1996, 13571, 2015, 1010, 2478, ...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/sql/functions.py
array_position
def array_position(col, value): """ Collection function: Locates the position of the first occurrence of the given value in the given array. Returns null if either of the arguments are null. .. note:: The position is not zero based, but 1 based index. Returns 0 if the given value could not be f...
python
def array_position(col, value): """ Collection function: Locates the position of the first occurrence of the given value in the given array. Returns null if either of the arguments are null. .. note:: The position is not zero based, but 1 based index. Returns 0 if the given value could not be f...
[ "def", "array_position", "(", "col", ",", "value", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "return", "Column", "(", "sc", ".", "_jvm", ".", "functions", ".", "array_position", "(", "_to_java_column", "(", "col", ")", ",", "value", ...
Collection function: Locates the position of the first occurrence of the given value in the given array. Returns null if either of the arguments are null. .. note:: The position is not zero based, but 1 based index. Returns 0 if the given value could not be found in the array. >>> df = spark.creat...
[ "Collection", "function", ":", "Locates", "the", "position", "of", "the", "first", "occurrence", "of", "the", "given", "value", "in", "the", "given", "array", ".", "Returns", "null", "if", "either", "of", "the", "arguments", "are", "null", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2016-L2029
train
Returns the position of the first occurrence of the given value in the given column in the given array.
[ 30522, 13366, 9140, 1035, 2597, 1006, 8902, 1010, 3643, 1007, 1024, 1000, 1000, 1000, 3074, 3853, 1024, 12453, 2015, 1996, 2597, 1997, 1996, 2034, 14404, 1997, 1996, 2445, 3643, 1999, 1996, 2445, 9140, 1012, 5651, 19701, 2065, 2593, 1997, ...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/ml/regression.py
GeneralizedLinearRegressionModel.summary
def summary(self): """ Gets summary (e.g. residuals, deviance, pValues) of model on training set. An exception is thrown if `trainingSummary is None`. """ if self.hasSummary: return GeneralizedLinearRegressionTrainingSummary( super(GeneralizedL...
python
def summary(self): """ Gets summary (e.g. residuals, deviance, pValues) of model on training set. An exception is thrown if `trainingSummary is None`. """ if self.hasSummary: return GeneralizedLinearRegressionTrainingSummary( super(GeneralizedL...
[ "def", "summary", "(", "self", ")", ":", "if", "self", ".", "hasSummary", ":", "return", "GeneralizedLinearRegressionTrainingSummary", "(", "super", "(", "GeneralizedLinearRegressionModel", ",", "self", ")", ".", "summary", ")", "else", ":", "raise", "RuntimeError...
Gets summary (e.g. residuals, deviance, pValues) of model on training set. An exception is thrown if `trainingSummary is None`.
[ "Gets", "summary", "(", "e", ".", "g", ".", "residuals", "deviance", "pValues", ")", "of", "model", "on", "training", "set", ".", "An", "exception", "is", "thrown", "if", "trainingSummary", "is", "None", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/regression.py#L1679-L1690
train
Returns a GeneralizedLinearRegressionTrainingSummary object for this training set.
[ 30522, 13366, 12654, 1006, 2969, 1007, 1024, 1000, 1000, 1000, 4152, 12654, 1006, 1041, 1012, 1043, 1012, 21961, 2015, 1010, 14386, 6651, 1010, 26189, 2389, 15808, 1007, 1997, 2944, 2006, 2731, 2275, 1012, 2019, 6453, 2003, 6908, 2065, 1036...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/sql/dataframe.py
DataFrame._repr_html_
def _repr_html_(self): """Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML. """ import cgi if not self._support_repr_html: self...
python
def _repr_html_(self): """Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML. """ import cgi if not self._support_repr_html: self...
[ "def", "_repr_html_", "(", "self", ")", ":", "import", "cgi", "if", "not", "self", ".", "_support_repr_html", ":", "self", ".", "_support_repr_html", "=", "True", "if", "self", ".", "sql_ctx", ".", "_conf", ".", "isReplEagerEvalEnabled", "(", ")", ":", "ma...
Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML.
[ "Returns", "a", "dataframe", "with", "html", "code", "when", "you", "enabled", "eager", "evaluation", "by", "spark", ".", "sql", ".", "repl", ".", "eagerEval", ".", "enabled", "this", "only", "called", "by", "REPL", "you", "are", "using", "support", "eager...
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L372-L403
train
Returns a dataframe with html code when you enabled eager evaluation by Spark SQL.
[ 30522, 13366, 1035, 16360, 2099, 1035, 16129, 1035, 1006, 2969, 1007, 1024, 1000, 1000, 1000, 5651, 1037, 2951, 15643, 2007, 16129, 3642, 2043, 2017, 9124, 9461, 9312, 2011, 1005, 12125, 1012, 29296, 1012, 16360, 2140, 1012, 9461, 13331, 21...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/rdd.py
RDD.collect
def collect(self): """ Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. """ with SCCallSiteSync(self.context) as ...
python
def collect(self): """ Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. """ with SCCallSiteSync(self.context) as ...
[ "def", "collect", "(", "self", ")", ":", "with", "SCCallSiteSync", "(", "self", ".", "context", ")", "as", "css", ":", "sock_info", "=", "self", ".", "ctx", ".", "_jvm", ".", "PythonRDD", ".", "collectAndServe", "(", "self", ".", "_jrdd", ".", "rdd", ...
Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory.
[ "Return", "a", "list", "that", "contains", "all", "of", "the", "elements", "in", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L810-L819
train
Returns a list containing all of the elements in this RDD.
[ 30522, 13366, 8145, 1006, 2969, 1007, 1024, 1000, 1000, 1000, 2709, 1037, 2862, 2008, 3397, 2035, 1997, 1996, 3787, 1999, 2023, 16428, 2094, 1012, 1012, 1012, 3602, 1024, 1024, 2023, 4118, 2323, 2069, 2022, 2109, 2065, 1996, 4525, 9140, 2...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/sql/utils.py
require_minimum_pandas_version
def require_minimum_pandas_version(): """ Raise ImportError if minimum version of Pandas is not installed """ # TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pandas_version = "0.19.2" from distutils.version import LooseVersion try: import pandas ...
python
def require_minimum_pandas_version(): """ Raise ImportError if minimum version of Pandas is not installed """ # TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pandas_version = "0.19.2" from distutils.version import LooseVersion try: import pandas ...
[ "def", "require_minimum_pandas_version", "(", ")", ":", "# TODO(HyukjinKwon): Relocate and deduplicate the version specification.", "minimum_pandas_version", "=", "\"0.19.2\"", "from", "distutils", ".", "version", "import", "LooseVersion", "try", ":", "import", "pandas", "have_...
Raise ImportError if minimum version of Pandas is not installed
[ "Raise", "ImportError", "if", "minimum", "version", "of", "Pandas", "is", "not", "installed" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/utils.py#L130-L147
train
Raise ImportError if minimum version of Pandas is not installed.
[ 30522, 13366, 5478, 1035, 6263, 1035, 25462, 2015, 1035, 2544, 1006, 1007, 1024, 1000, 1000, 1000, 5333, 12324, 2121, 29165, 2065, 6263, 2544, 1997, 25462, 2015, 2003, 2025, 5361, 1000, 1000, 1000, 1001, 28681, 2080, 1006, 1044, 10513, 2243...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/mllib/linalg/__init__.py
DenseMatrix.asML
def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.DenseMatrix` .. versionadded:: 2.0.0 """ return newlinalg.DenseMatrix(self.numRows, self.numCo...
python
def asML(self): """ Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.DenseMatrix` .. versionadded:: 2.0.0 """ return newlinalg.DenseMatrix(self.numRows, self.numCo...
[ "def", "asML", "(", "self", ")", ":", "return", "newlinalg", ".", "DenseMatrix", "(", "self", ".", "numRows", ",", "self", ".", "numCols", ",", "self", ".", "values", ",", "self", ".", "isTransposed", ")" ]
Convert this matrix to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.DenseMatrix` .. versionadded:: 2.0.0
[ "Convert", "this", "matrix", "to", "the", "new", "mllib", "-", "local", "representation", ".", "This", "does", "NOT", "copy", "the", "data", ";", "it", "copies", "references", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1112-L1121
train
Convert this matrix to the new mllib - local representation.
[ 30522, 13366, 2004, 19968, 1006, 2969, 1007, 1024, 1000, 1000, 1000, 10463, 2023, 8185, 2000, 1996, 2047, 19875, 29521, 1011, 2334, 6630, 1012, 2023, 2515, 2025, 6100, 1996, 2951, 1025, 2009, 4809, 7604, 1012, 1024, 2709, 1024, 1024, 1052, ...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/heapq3.py
nsmallest
def nsmallest(n, iterable, key=None): """Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n] """ # Short-cut for n==1 is to use min() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = min(it, default...
python
def nsmallest(n, iterable, key=None): """Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n] """ # Short-cut for n==1 is to use min() if n == 1: it = iter(iterable) sentinel = object() if key is None: result = min(it, default...
[ "def", "nsmallest", "(", "n", ",", "iterable", ",", "key", "=", "None", ")", ":", "# Short-cut for n==1 is to use min()", "if", "n", "==", "1", ":", "it", "=", "iter", "(", "iterable", ")", "sentinel", "=", "object", "(", ")", "if", "key", "is", "None"...
Find the n smallest elements in a dataset. Equivalent to: sorted(iterable, key=key)[:n]
[ "Find", "the", "n", "smallest", "elements", "in", "a", "dataset", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L742-L803
train
Find the n smallest elements in a dataset.
[ 30522, 13366, 24978, 9067, 4244, 2102, 1006, 1050, 1010, 2009, 6906, 3468, 1010, 3145, 1027, 3904, 1007, 1024, 1000, 1000, 1000, 2424, 1996, 1050, 10479, 3787, 1999, 1037, 2951, 13462, 1012, 5662, 2000, 1024, 19616, 1006, 2009, 6906, 3468, ...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/sql/context.py
SQLContext.getOrCreate
def getOrCreate(cls, sc): """ Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext """ if cls._instantiatedContext is None: jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc()) sparkSession = SparkSession(...
python
def getOrCreate(cls, sc): """ Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext """ if cls._instantiatedContext is None: jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc()) sparkSession = SparkSession(...
[ "def", "getOrCreate", "(", "cls", ",", "sc", ")", ":", "if", "cls", ".", "_instantiatedContext", "is", "None", ":", "jsqlContext", "=", "sc", ".", "_jvm", ".", "SQLContext", ".", "getOrCreate", "(", "sc", ".", "_jsc", ".", "sc", "(", ")", ")", "spark...
Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext
[ "Get", "the", "existing", "SQLContext", "or", "create", "a", "new", "one", "with", "given", "SparkContext", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L103-L113
train
Get the existing SQLContext or create a new one with given SparkContext.
[ 30522, 13366, 2131, 2953, 16748, 3686, 1006, 18856, 2015, 1010, 8040, 1007, 1024, 1000, 1000, 1000, 2131, 1996, 4493, 29296, 8663, 18209, 2030, 3443, 1037, 2047, 2028, 2007, 2445, 12125, 8663, 18209, 1012, 1024, 11498, 2213, 8040, 1024, 121...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/serializers.py
ArrowStreamPandasSerializer.load_stream
def load_stream(self, stream): """ Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series. """ batches = super(ArrowStreamPandasSerializer, self).load_stream(stream) import pyarrow as pa for batch in batches: yield [self.arrow_t...
python
def load_stream(self, stream): """ Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series. """ batches = super(ArrowStreamPandasSerializer, self).load_stream(stream) import pyarrow as pa for batch in batches: yield [self.arrow_t...
[ "def", "load_stream", "(", "self", ",", "stream", ")", ":", "batches", "=", "super", "(", "ArrowStreamPandasSerializer", ",", "self", ")", ".", "load_stream", "(", "stream", ")", "import", "pyarrow", "as", "pa", "for", "batch", "in", "batches", ":", "yield...
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
[ "Deserialize", "ArrowRecordBatches", "to", "an", "Arrow", "table", "and", "return", "as", "a", "list", "of", "pandas", ".", "Series", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L345-L352
train
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas. Series.
[ 30522, 13366, 7170, 1035, 5460, 1006, 2969, 1010, 5460, 1007, 1024, 1000, 1000, 1000, 4078, 11610, 3669, 4371, 8612, 2890, 27108, 18939, 4017, 8376, 2000, 2019, 8612, 2795, 1998, 2709, 2004, 1037, 2862, 1997, 25462, 2015, 1012, 2186, 1012, ...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/heapq3.py
heappush
def heappush(heap, item): """Push item onto heap, maintaining the heap invariant.""" heap.append(item) _siftdown(heap, 0, len(heap)-1)
python
def heappush(heap, item): """Push item onto heap, maintaining the heap invariant.""" heap.append(item) _siftdown(heap, 0, len(heap)-1)
[ "def", "heappush", "(", "heap", ",", "item", ")", ":", "heap", ".", "append", "(", "item", ")", "_siftdown", "(", "heap", ",", "0", ",", "len", "(", "heap", ")", "-", "1", ")" ]
Push item onto heap, maintaining the heap invariant.
[ "Push", "item", "onto", "heap", "maintaining", "the", "heap", "invariant", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L411-L414
train
Push item onto heap maintaining the heap invariant.
[ 30522, 13366, 16721, 12207, 2232, 1006, 16721, 1010, 8875, 1007, 1024, 1000, 1000, 1000, 5245, 8875, 3031, 16721, 1010, 8498, 1996, 16721, 23915, 1012, 1000, 1000, 1000, 16721, 1012, 10439, 10497, 1006, 8875, 1007, 1035, 9033, 6199, 7698, 1...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/streaming/context.py
StreamingContext.queueStream
def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. ...
python
def queueStream(self, rdds, oneAtATime=True, default=None): """ Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. ...
[ "def", "queueStream", "(", "self", ",", "rdds", ",", "oneAtATime", "=", "True", ",", "default", "=", "None", ")", ":", "if", "default", "and", "not", "isinstance", "(", "default", ",", "RDD", ")", ":", "default", "=", "self", ".", "_sc", ".", "parall...
Create an input stream from a queue of RDDs or list. In each batch, it will process either one or all of the RDDs returned by the queue. .. note:: Changes to the queue after the stream is created will not be recognized. @param rdds: Queue of RDDs @param oneAtATime: pick one rdd e...
[ "Create", "an", "input", "stream", "from", "a", "queue", "of", "RDDs", "or", "list", ".", "In", "each", "batch", "it", "will", "process", "either", "one", "or", "all", "of", "the", "RDDs", "returned", "by", "the", "queue", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L286-L313
train
Create an input stream from a queue of RDDs or list of RDDs.
[ 30522, 13366, 24240, 21422, 1006, 2969, 1010, 16428, 5104, 1010, 2028, 6790, 7292, 1027, 2995, 1010, 12398, 1027, 3904, 1007, 1024, 1000, 1000, 1000, 3443, 2019, 7953, 5460, 2013, 1037, 24240, 1997, 16428, 5104, 2030, 2862, 1012, 1999, 2169...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
apache/spark
python/pyspark/serializers.py
_hijack_namedtuple
def _hijack_namedtuple(): """ Hack namedtuple() to make it picklable """ # hijack only one time if hasattr(collections.namedtuple, "__hijack"): return global _old_namedtuple # or it will put in closure global _old_namedtuple_kwdefaults # or it will put in closure too def _copy_func(f...
python
def _hijack_namedtuple(): """ Hack namedtuple() to make it picklable """ # hijack only one time if hasattr(collections.namedtuple, "__hijack"): return global _old_namedtuple # or it will put in closure global _old_namedtuple_kwdefaults # or it will put in closure too def _copy_func(f...
[ "def", "_hijack_namedtuple", "(", ")", ":", "# hijack only one time", "if", "hasattr", "(", "collections", ".", "namedtuple", ",", "\"__hijack\"", ")", ":", "return", "global", "_old_namedtuple", "# or it will put in closure", "global", "_old_namedtuple_kwdefaults", "# or...
Hack namedtuple() to make it picklable
[ "Hack", "namedtuple", "()", "to", "make", "it", "picklable" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/serializers.py#L600-L651
train
Hijacks a namedtuple function to make it picklable
[ 30522, 13366, 1035, 7632, 17364, 1035, 2315, 8525, 10814, 1006, 1007, 1024, 1000, 1000, 1000, 20578, 2315, 8525, 10814, 1006, 1007, 2000, 2191, 2009, 4060, 20470, 2571, 1000, 1000, 1000, 1001, 7632, 17364, 2069, 2028, 2051, 2065, 2038, 1932...
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1...
[ -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100...
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
3