repo stringclasses 4
values | path stringlengths 20 56 | func_name stringlengths 3 41 | original_string stringlengths 140 10k | language stringclasses 1
value | code stringlengths 140 10k | code_tokens listlengths 22 929 | docstring stringlengths 18 8.4k | docstring_tokens listlengths 2 122 | sha stringclasses 4
values | url stringlengths 108 167 | partition stringclasses 1
value | summary stringlengths 17 285 | input_ids listlengths 502 502 | token_type_ids listlengths 502 502 | attention_mask listlengths 502 502 | labels listlengths 502 502 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
apache/spark | python/pyspark/sql/column.py | Column.otherwise | def otherwise(self, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param value:... | python | def otherwise(self, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param value:... | [
"def",
"otherwise",
"(",
"self",
",",
"value",
")",
":",
"v",
"=",
"value",
".",
"_jc",
"if",
"isinstance",
"(",
"value",
",",
"Column",
")",
"else",
"value",
"jc",
"=",
"self",
".",
"_jc",
".",
"otherwise",
"(",
"v",
")",
"return",
"Column",
"(",
... | Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param value: a literal value, or a :class:`Column` expressio... | [
"Evaluates",
"a",
"list",
"of",
"conditions",
"and",
"returns",
"one",
"of",
"multiple",
"possible",
"result",
"expressions",
".",
"If",
":",
"func",
":",
"Column",
".",
"otherwise",
"is",
"not",
"invoked",
"None",
"is",
"returned",
"for",
"unmatched",
"cond... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L640-L660 | train | Evaluates a list of conditions and returns one of multiple possible result expressions. | [
30522,
13366,
4728,
1006,
2969,
1010,
3643,
1007,
1024,
1000,
1000,
1000,
16157,
2015,
1037,
2862,
1997,
3785,
1998,
5651,
2028,
1997,
3674,
2825,
2765,
11423,
1012,
2065,
1024,
4569,
2278,
1024,
1036,
5930,
1012,
4728,
1036,
2003,
2025,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/linalg/__init__.py | DenseVector.parse | def parse(s):
"""
Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0])
"""
start = s.find('[')
if start == -1:
raise ValueError("Array should start with '['.")
... | python | def parse(s):
"""
Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0])
"""
start = s.find('[')
if start == -1:
raise ValueError("Array should start with '['.")
... | [
"def",
"parse",
"(",
"s",
")",
":",
"start",
"=",
"s",
".",
"find",
"(",
"'['",
")",
"if",
"start",
"==",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Array should start with '['.\"",
")",
"end",
"=",
"s",
".",
"find",
"(",
"']'",
")",
"if",
"end"... | Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0]) | [
"Parse",
"string",
"representation",
"back",
"into",
"the",
"DenseVector",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L297-L316 | train | Parse string representation back into the DenseVector. | [
30522,
13366,
11968,
3366,
1006,
1055,
1007,
1024,
1000,
1000,
1000,
11968,
3366,
5164,
6630,
2067,
2046,
1996,
9742,
3726,
16761,
1012,
1028,
1028,
1028,
9742,
3726,
16761,
1012,
11968,
3366,
1006,
1005,
1031,
1014,
1012,
1014,
1010,
1015,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.takeOrdered | def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driv... | python | def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driv... | [
"def",
"takeOrdered",
"(",
"self",
",",
"num",
",",
"key",
"=",
"None",
")",
":",
"def",
"merge",
"(",
"a",
",",
"b",
")",
":",
"return",
"heapq",
".",
"nsmallest",
"(",
"num",
",",
"a",
"+",
"b",
",",
"key",
")",
"return",
"self",
".",
"mapPar... | Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, ... | [
"Get",
"the",
"N",
"elements",
"from",
"an",
"RDD",
"ordered",
"in",
"ascending",
"order",
"or",
"as",
"specified",
"by",
"the",
"optional",
"key",
"function",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1289-L1306 | train | Take the N elements from an RDD ordered in ascending order or as
is specified by the optional key function. | [
30522,
13366,
2202,
8551,
6850,
1006,
2969,
1010,
16371,
2213,
1010,
3145,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2131,
1996,
1050,
3787,
2013,
2019,
16428,
2094,
3641,
1999,
22316,
2344,
2030,
2004,
9675,
2011,
1996,
11887,
3145,
3853,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/tuning.py | CrossValidator._from_java | def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed =... | python | def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed =... | [
"def",
"_from_java",
"(",
"cls",
",",
"java_stage",
")",
":",
"estimator",
",",
"epms",
",",
"evaluator",
"=",
"super",
"(",
"CrossValidator",
",",
"cls",
")",
".",
"_from_java_impl",
"(",
"java_stage",
")",
"numFolds",
"=",
"java_stage",
".",
"getNumFolds",... | Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence. | [
"Given",
"a",
"Java",
"CrossValidator",
"create",
"and",
"return",
"a",
"Python",
"wrapper",
"of",
"it",
".",
"Used",
"for",
"ML",
"persistence",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L351-L367 | train | Given a Java CrossValidator create and return a Python wrapper of it. | [
30522,
13366,
1035,
2013,
1035,
9262,
1006,
18856,
2015,
1010,
9262,
1035,
2754,
1007,
1024,
1000,
1000,
1000,
2445,
1037,
9262,
2892,
10175,
8524,
4263,
1010,
3443,
1998,
2709,
1037,
18750,
10236,
4842,
1997,
2009,
1012,
2109,
2005,
19875,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.select | def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame... | python | def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame... | [
"def",
"select",
"(",
"self",
",",
"*",
"cols",
")",
":",
"jdf",
"=",
"self",
".",
"_jdf",
".",
"select",
"(",
"self",
".",
"_jcols",
"(",
"*",
"cols",
")",
")",
"return",
"DataFrame",
"(",
"jdf",
",",
"self",
".",
"sql_ctx",
")"
] | Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collec... | [
"Projects",
"a",
"set",
"of",
"expressions",
"and",
"returns",
"a",
"new",
":",
"class",
":",
"DataFrame",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1302-L1317 | train | Projects a set of expressions and returns a new DataFrame. | [
30522,
13366,
7276,
1006,
2969,
1010,
1008,
8902,
2015,
1007,
1024,
1000,
1000,
1000,
3934,
1037,
2275,
1997,
11423,
1998,
5651,
1037,
2047,
1024,
2465,
1024,
1036,
2951,
15643,
1036,
1012,
1024,
11498,
2213,
8902,
2015,
1024,
2862,
1997,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/datetimes.py | bdate_range | def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, weekmask=None, holidays=None,
closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with business day as the default
frequency
Parameters
----------
st... | python | def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, weekmask=None, holidays=None,
closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with business day as the default
frequency
Parameters
----------
st... | [
"def",
"bdate_range",
"(",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"periods",
"=",
"None",
",",
"freq",
"=",
"'B'",
",",
"tz",
"=",
"None",
",",
"normalize",
"=",
"True",
",",
"name",
"=",
"None",
",",
"weekmask",
"=",
"None",
",",
"h... | Return a fixed frequency DatetimeIndex, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates.
end : string or datetime-like, default None
Right bound for generating dates.
periods : integer... | [
"Return",
"a",
"fixed",
"frequency",
"DatetimeIndex",
"with",
"business",
"day",
"as",
"the",
"default",
"frequency"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimes.py#L1548-L1633 | train | Return a fixed frequency DatetimeIndex with business day as the default. | [
30522,
13366,
1038,
13701,
1035,
2846,
1006,
2707,
1027,
3904,
1010,
2203,
1027,
3904,
1010,
6993,
1027,
3904,
1010,
10424,
2063,
4160,
1027,
1005,
1038,
1005,
30524,
1027,
3904,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | date_add | def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
... | python | def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
... | [
"def",
"date_add",
"(",
"start",
",",
"days",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"date_add",
"(",
"_to_java_column",
"(",
"start",
")",
",",
"days",
")",
"... | Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))] | [
"Returns",
"the",
"date",
"that",
"is",
"days",
"days",
"after",
"start"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1058-L1067 | train | Returns the date that is days after start | [
30522,
13366,
3058,
1035,
5587,
1006,
2707,
1010,
2420,
1007,
1024,
1000,
1000,
1000,
5651,
1996,
3058,
2008,
2003,
1036,
2420,
1036,
2420,
2044,
1036,
2707,
1036,
1028,
1028,
1028,
1040,
2546,
1027,
12125,
1012,
2580,
6790,
15643,
1006,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/panel.py | Panel._homogenize_dict | def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
... | python | def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
... | [
"def",
"_homogenize_dict",
"(",
"self",
",",
"frames",
",",
"intersect",
"=",
"True",
",",
"dtype",
"=",
"None",
")",
":",
"result",
"=",
"dict",
"(",
")",
"# caller differs dict/ODict, preserved type",
"if",
"isinstance",
"(",
"frames",
",",
"OrderedDict",
")... | Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indices | [
"Conform",
"set",
"of",
"_constructor_sliced",
"-",
"like",
"objects",
"to",
"either",
"an",
"intersection",
"of",
"indices",
"/",
"columns",
"or",
"a",
"union",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1476-L1517 | train | Convert a dict of _constructor_sliced - like objects to a dictionary of aligned results and indices. | [
30522,
13366,
1035,
24004,
6914,
4697,
1035,
4487,
6593,
1006,
2969,
1010,
11048,
1010,
29261,
1027,
2995,
1010,
26718,
18863,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
23758,
2275,
1997,
1035,
9570,
2953,
1035,
15920,
1011,
2066,
5200,
200... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.sortWithinPartitions | def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs... | python | def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs... | [
"def",
"sortWithinPartitions",
"(",
"self",
",",
"*",
"cols",
",",
"*",
"*",
"kwargs",
")",
":",
"jdf",
"=",
"self",
".",
"_jdf",
".",
"sortWithinPartitions",
"(",
"self",
".",
"_sort_cols",
"(",
"cols",
",",
"kwargs",
")",
")",
"return",
"DataFrame",
... | Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
... | [
"Returns",
"a",
"new",
":",
"class",
":",
"DataFrame",
"with",
"each",
"partition",
"sorted",
"by",
"the",
"specified",
"column",
"(",
"s",
")",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1049-L1066 | train | Returns a new DataFrame with each partition sorted by the specified column names. | [
30522,
13366,
4066,
24415,
2378,
19362,
3775,
9285,
1006,
2969,
1010,
1008,
8902,
2015,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
2047,
1024,
2465,
1024,
1036,
2951,
15643,
1036,
2007,
2169,
13571,
19616,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/clustering.py | BisectingKMeansModel.summary | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
... | python | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
... | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"hasSummary",
":",
"return",
"BisectingKMeansSummary",
"(",
"super",
"(",
"BisectingKMeansModel",
",",
"self",
")",
".",
"summary",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No training summa... | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. | [
"Gets",
"summary",
"(",
"e",
".",
"g",
".",
"cluster",
"assignments",
"cluster",
"sizes",
")",
"of",
"the",
"model",
"trained",
"on",
"the",
"training",
"set",
".",
"An",
"exception",
"is",
"thrown",
"if",
"no",
"summary",
"exists",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/clustering.py#L522-L531 | train | Returns the training set summary. | [
30522,
13366,
12654,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
4152,
12654,
1006,
1041,
1012,
1043,
1012,
9324,
14799,
1010,
9324,
10826,
1007,
1997,
1996,
2944,
4738,
2006,
1996,
2731,
2275,
1012,
2019,
6453,
2003,
6908,
2065,
2053,
12654,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/context.py | SparkContext.textFile | def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
The text files must be encoded as UTF-8.
If use_unic... | python | def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
The text files must be encoded as UTF-8.
If use_unic... | [
"def",
"textFile",
"(",
"self",
",",
"name",
",",
"minPartitions",
"=",
"None",
",",
"use_unicode",
"=",
"True",
")",
":",
"minPartitions",
"=",
"minPartitions",
"or",
"min",
"(",
"self",
".",
"defaultParallelism",
",",
"2",
")",
"return",
"RDD",
"(",
"s... | Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
The text files must be encoded as UTF-8.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which... | [
"Read",
"a",
"text",
"file",
"from",
"HDFS",
"a",
"local",
"file",
"system",
"(",
"available",
"on",
"all",
"nodes",
")",
"or",
"any",
"Hadoop",
"-",
"supported",
"file",
"system",
"URI",
"and",
"return",
"it",
"as",
"an",
"RDD",
"of",
"Strings",
".",
... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L582-L602 | train | Read a text file from HDFS or local file system or any Hadoop - supported file system URI and return it as an RDD of Strings. | [
30522,
13366,
3793,
8873,
2571,
1006,
2969,
1010,
2171,
1010,
8117,
19362,
3775,
9285,
1027,
3904,
1010,
2224,
1035,
27260,
1027,
2995,
1007,
1024,
1000,
1000,
1000,
3191,
1037,
3793,
5371,
2013,
10751,
10343,
1010,
1037,
2334,
5371,
2291,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.delete | def delete(self, loc):
"""
Make new Index with passed location(-s) deleted.
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc)) | python | def delete(self, loc):
"""
Make new Index with passed location(-s) deleted.
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc)) | [
"def",
"delete",
"(",
"self",
",",
"loc",
")",
":",
"return",
"self",
".",
"_shallow_copy",
"(",
"np",
".",
"delete",
"(",
"self",
".",
"_data",
",",
"loc",
")",
")"
] | Make new Index with passed location(-s) deleted.
Returns
-------
new_index : Index | [
"Make",
"new",
"Index",
"with",
"passed",
"location",
"(",
"-",
"s",
")",
"deleted",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4915-L4923 | train | Delete the entry at the specified location. | [
30522,
13366,
3972,
12870,
1006,
2969,
1010,
8840,
2278,
1007,
1024,
1000,
1000,
1000,
2191,
2047,
5950,
2007,
2979,
3295,
1006,
1011,
1055,
1007,
17159,
1012,
5651,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
2047,
1035,
5950,
1024,
5950,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
huggingface/pytorch-pretrained-BERT | examples/lm_finetuning/simple_lm_finetuning.py | random_word | def random_word(tokens, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list... | python | def random_word(tokens, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list... | [
"def",
"random_word",
"(",
"tokens",
",",
"tokenizer",
")",
":",
"output_label",
"=",
"[",
"]",
"for",
"i",
",",
"token",
"in",
"enumerate",
"(",
"tokens",
")",
":",
"prob",
"=",
"random",
".",
"random",
"(",
")",
"# mask token with 15% probability",
"if",... | Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for L... | [
"Masking",
"some",
"random",
"tokens",
"for",
"Language",
"Model",
"task",
"with",
"probabilities",
"as",
"in",
"the",
"original",
"BERT",
"paper",
".",
":",
"param",
"tokens",
":",
"list",
"of",
"str",
"tokenized",
"sentence",
".",
":",
"param",
"tokenizer"... | b832d5bb8a6dfc5965015b828e577677eace601e | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/lm_finetuning/simple_lm_finetuning.py#L267-L303 | train | Mask some random tokens for Language Model task with probabilities as in the original BERT paper. | [
30522,
13366,
6721,
1035,
2773,
1006,
19204,
2015,
1010,
19204,
17629,
1007,
30524,
2070,
6721,
19204,
2015,
2005,
2653,
2944,
4708,
2007,
4013,
3676,
14680,
2004,
1999,
1996,
2434,
14324,
3259,
1012,
1024,
11498,
2213,
19204,
2015,
1024,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/__init__.py | keyword_only | def keyword_only(func):
"""
A decorator that forces keyword arguments in the wrapped method
and saves actual input keyword arguments in `_input_kwargs`.
.. note:: Should only be used to wrap a method where first arg is `self`
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if l... | python | def keyword_only(func):
"""
A decorator that forces keyword arguments in the wrapped method
and saves actual input keyword arguments in `_input_kwargs`.
.. note:: Should only be used to wrap a method where first arg is `self`
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if l... | [
"def",
"keyword_only",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"0",
":",
"raise",
"TypeError",
"(",
"\"Method %... | A decorator that forces keyword arguments in the wrapped method
and saves actual input keyword arguments in `_input_kwargs`.
.. note:: Should only be used to wrap a method where first arg is `self` | [
"A",
"decorator",
"that",
"forces",
"keyword",
"arguments",
"in",
"the",
"wrapped",
"method",
"and",
"saves",
"actual",
"input",
"keyword",
"arguments",
"in",
"_input_kwargs",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/__init__.py#L98-L111 | train | A decorator that ensures that a method is called with keyword arguments. | [
30522,
13366,
3145,
18351,
1035,
2069,
1006,
4569,
2278,
1007,
1024,
1000,
1000,
1000,
1037,
25545,
8844,
2008,
2749,
3145,
18351,
9918,
1999,
1996,
5058,
4118,
1998,
13169,
5025,
7953,
3145,
18351,
9918,
1999,
1036,
1035,
7953,
1035,
6448,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | array_join | def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(a... | python | def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(a... | [
"def",
"array_join",
"(",
"col",
",",
"delimiter",
",",
"null_replacement",
"=",
"None",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"if",
"null_replacement",
"is",
"None",
":",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functio... | Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=... | [
"Concatenates",
"the",
"elements",
"of",
"column",
"using",
"the",
"delimiter",
".",
"Null",
"values",
"are",
"replaced",
"with",
"null_replacement",
"if",
"set",
"otherwise",
"they",
"are",
"ignored",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1977-L1993 | train | Concatenates the elements of column using the delimiter. Null values are replaced with
null_replacement. | [
30522,
13366,
9140,
1035,
3693,
1006,
8902,
1010,
3972,
27605,
3334,
1010,
19701,
1035,
6110,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
9530,
16280,
12556,
2015,
1996,
3787,
1997,
1036,
5930,
1036,
2478,
1996,
1036,
3972,
27605,
3334,
1036,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._get_nearest_indexer | def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
... | python | def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
... | [
"def",
"_get_nearest_indexer",
"(",
"self",
",",
"target",
",",
"limit",
",",
"tolerance",
")",
":",
"left_indexer",
"=",
"self",
".",
"get_indexer",
"(",
"target",
",",
"'pad'",
",",
"limit",
"=",
"limit",
")",
"right_indexer",
"=",
"self",
".",
"get_inde... | Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples). | [
"Get",
"the",
"indexer",
"for",
"the",
"nearest",
"index",
"labels",
";",
"requires",
"an",
"index",
"with",
"values",
"that",
"can",
"be",
"subtracted",
"from",
"each",
"other",
"(",
"e",
".",
"g",
".",
"not",
"strings",
"or",
"tuples",
")",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2807-L2826 | train | Get the indexer for the nearest index labels. | [
30522,
13366,
1035,
2131,
1035,
7205,
1035,
5950,
2121,
1006,
2969,
1010,
4539,
1010,
5787,
1010,
13986,
1007,
1024,
1000,
1000,
1000,
2131,
1996,
5950,
2121,
2005,
1996,
7205,
5950,
10873,
1025,
5942,
2019,
5950,
2007,
5300,
2008,
2064,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/common.py | _py2java | def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, list):
obj = [_py2java(sc, x)... | python | def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, list):
obj = [_py2java(sc, x)... | [
"def",
"_py2java",
"(",
"sc",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"RDD",
")",
":",
"obj",
"=",
"_to_java_object_rdd",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"DataFrame",
")",
":",
"obj",
"=",
"obj",
".",
"_jd... | Convert Python object into Java | [
"Convert",
"Python",
"object",
"into",
"Java"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/common.py#L72-L89 | train | Convert Python object into Java object. | [
30522,
13366,
1035,
1052,
2100,
2475,
3900,
3567,
1006,
8040,
1010,
27885,
3501,
1007,
1024,
1000,
1000,
1000,
10463,
18750,
4874,
2046,
9262,
1000,
1000,
1000,
2065,
2003,
7076,
26897,
1006,
27885,
3501,
1010,
16428,
2094,
1007,
1024,
2788... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/readwriter.py | DataFrameWriter.json | def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None,
lineSep=None, encoding=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
... | python | def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None,
lineSep=None, encoding=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
... | [
"def",
"json",
"(",
"self",
",",
"path",
",",
"mode",
"=",
"None",
",",
"compression",
"=",
"None",
",",
"dateFormat",
"=",
"None",
",",
"timestampFormat",
"=",
"None",
",",
"lineSep",
"=",
"None",
",",
"encoding",
"=",
"None",
")",
":",
"self",
".",... | Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data a... | [
"Saves",
"the",
"content",
"of",
"the",
":",
"class",
":",
"DataFrame",
"in",
"JSON",
"format",
"(",
"JSON",
"Lines",
"text",
"format",
"or",
"newline",
"-",
"delimited",
"JSON",
"<http",
":",
"//",
"jsonlines",
".",
"org",
"/",
">",
"_",
")",
"at",
... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L789-L826 | train | Saves the contents of the DataFrame in JSON format at the specified path. | [
30522,
13366,
1046,
3385,
1006,
2969,
1010,
4130,
1010,
5549,
1027,
3904,
1010,
13379,
1027,
3904,
1010,
3058,
14192,
4017,
1027,
3904,
1010,
2335,
15464,
14376,
2953,
18900,
1027,
3904,
1010,
3210,
13699,
1027,
3904,
1010,
17181,
1027,
390... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._add_comparison_methods | def _add_comparison_methods(cls):
"""
Add in comparison methods.
"""
cls.__eq__ = _make_comparison_op(operator.eq, cls)
cls.__ne__ = _make_comparison_op(operator.ne, cls)
cls.__lt__ = _make_comparison_op(operator.lt, cls)
cls.__gt__ = _make_comparison_op(operator.... | python | def _add_comparison_methods(cls):
"""
Add in comparison methods.
"""
cls.__eq__ = _make_comparison_op(operator.eq, cls)
cls.__ne__ = _make_comparison_op(operator.ne, cls)
cls.__lt__ = _make_comparison_op(operator.lt, cls)
cls.__gt__ = _make_comparison_op(operator.... | [
"def",
"_add_comparison_methods",
"(",
"cls",
")",
":",
"cls",
".",
"__eq__",
"=",
"_make_comparison_op",
"(",
"operator",
".",
"eq",
",",
"cls",
")",
"cls",
".",
"__ne__",
"=",
"_make_comparison_op",
"(",
"operator",
".",
"ne",
",",
"cls",
")",
"cls",
"... | Add in comparison methods. | [
"Add",
"in",
"comparison",
"methods",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5004-L5013 | train | Add in comparison methods. | [
30522,
13366,
1035,
5587,
1035,
7831,
1035,
4725,
1006,
18856,
2015,
1007,
1024,
1000,
1000,
1000,
5587,
1999,
7831,
4725,
1012,
1000,
1000,
1000,
18856,
2015,
1012,
1035,
1035,
1041,
4160,
1035,
1035,
1027,
1035,
2191,
1035,
7831,
1035,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/context.py | SparkContext._getJavaStorageLevel | def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apac... | python | def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apac... | [
"def",
"_getJavaStorageLevel",
"(",
"self",
",",
"storageLevel",
")",
":",
"if",
"not",
"isinstance",
"(",
"storageLevel",
",",
"StorageLevel",
")",
":",
"raise",
"Exception",
"(",
"\"storageLevel must be of type pyspark.StorageLevel\"",
")",
"newStorageLevel",
"=",
"... | Returns a Java StorageLevel based on a pyspark.StorageLevel. | [
"Returns",
"a",
"Java",
"StorageLevel",
"based",
"on",
"a",
"pyspark",
".",
"StorageLevel",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L949-L961 | train | Returns a Java StorageLevel based on a pyspark. StorageLevel. | [
30522,
13366,
1035,
2131,
3900,
12044,
4263,
4270,
20414,
2884,
1006,
2969,
1010,
5527,
20414,
2884,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
9262,
5527,
20414,
2884,
2241,
2006,
1037,
1052,
7274,
14432,
1012,
5527,
20414,
2884,
1012,
1000... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/clustering.py | StreamingKMeansModel.update | def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If poi... | python | def update(self, data, decayFactor, timeUnit):
"""Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If poi... | [
"def",
"update",
"(",
"self",
",",
"data",
",",
"decayFactor",
",",
"timeUnit",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"RDD",
")",
":",
"raise",
"TypeError",
"(",
"\"Data should be of an RDD, got %s.\"",
"%",
"type",
"(",
"data",
")",
")",
... | Update the centroids, according to data
:param data:
RDD with new data for the model update.
:param decayFactor:
Forgetfulness of the previous centroids.
:param timeUnit:
Can be "batches" or "points". If points, then the decay factor
is raised to the powe... | [
"Update",
"the",
"centroids",
"according",
"to",
"data"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L752-L777 | train | Update the k - MEAN model with new data. | [
30522,
13366,
10651,
1006,
2969,
1010,
2951,
1010,
13121,
7011,
16761,
1010,
2051,
19496,
2102,
1007,
1024,
1000,
1000,
1000,
10651,
1996,
18120,
9821,
1010,
2429,
2000,
2951,
1024,
11498,
2213,
2951,
1024,
16428,
2094,
2007,
2047,
2951,
20... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/panel.py | Panel.to_excel | def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet.
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missin... | python | def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet.
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missin... | [
"def",
"to_excel",
"(",
"self",
",",
"path",
",",
"na_rep",
"=",
"''",
",",
"engine",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"io",
".",
"excel",
"import",
"ExcelWriter",
"if",
"isinstance",
"(",
"path",
",",
"str",
"... | Write each DataFrame in Panel to a separate excel sheet.
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write en... | [
"Write",
"each",
"DataFrame",
"in",
"Panel",
"to",
"a",
"separate",
"excel",
"sheet",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L409-L458 | train | Writes each DataFrame in Panel to an Excel file. | [
30522,
13366,
2000,
1035,
24970,
1006,
2969,
1010,
4130,
1010,
6583,
1035,
16360,
1027,
1005,
1005,
1010,
3194,
1027,
3904,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
4339,
2169,
2951,
15643,
1999,
5997,
2000,
1037,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/panel.py | Panel._unpickle_panel_compat | def _unpickle_panel_compat(self, state): # pragma: no cover
"""
Unpickle the panel.
"""
from pandas.io.pickle import _unpickle_array
_unpickle = _unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
... | python | def _unpickle_panel_compat(self, state): # pragma: no cover
"""
Unpickle the panel.
"""
from pandas.io.pickle import _unpickle_array
_unpickle = _unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
... | [
"def",
"_unpickle_panel_compat",
"(",
"self",
",",
"state",
")",
":",
"# pragma: no cover",
"from",
"pandas",
".",
"io",
".",
"pickle",
"import",
"_unpickle_array",
"_unpickle",
"=",
"_unpickle_array",
"vals",
",",
"items",
",",
"major",
",",
"minor",
"=",
"st... | Unpickle the panel. | [
"Unpickle",
"the",
"panel",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L615-L629 | train | Unpickle the panel. | [
30522,
13366,
1035,
4895,
24330,
19099,
1035,
5997,
1035,
4012,
4502,
2102,
1006,
2969,
1010,
2110,
1007,
1024,
1001,
10975,
8490,
2863,
1024,
2053,
3104,
1000,
1000,
1000,
4895,
24330,
19099,
1996,
5997,
1012,
1000,
1000,
1000,
2013,
25462... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/io/parquet.py | read_parquet | def read_parquet(path, engine='auto', columns=None, **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
.. versionadded 0.21.0
Parameters
----------
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet libra... | python | def read_parquet(path, engine='auto', columns=None, **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
.. versionadded 0.21.0
Parameters
----------
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet libra... | [
"def",
"read_parquet",
"(",
"path",
",",
"engine",
"=",
"'auto'",
",",
"columns",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"impl",
"=",
"get_engine",
"(",
"engine",
")",
"return",
"impl",
".",
"read",
"(",
"path",
",",
"columns",
"=",
"columns... | Load a parquet object from the file path, returning a DataFrame.
.. versionadded 0.21.0
Parameters
----------
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used... | [
"Load",
"a",
"parquet",
"object",
"from",
"the",
"file",
"path",
"returning",
"a",
"DataFrame",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parquet.py#L254-L282 | train | Load a DataFrame from a Parquet file. | [
30522,
13366,
3191,
1035,
11968,
12647,
1006,
4130,
1010,
3194,
1027,
1005,
8285,
1005,
1010,
7753,
1027,
3904,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
7170,
1037,
11968,
12647,
4874,
2013,
1996,
5371,
4130,
1010,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/tseries/holiday.py | previous_friday | def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt | python | def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt | [
"def",
"previous_friday",
"(",
"dt",
")",
":",
"if",
"dt",
".",
"weekday",
"(",
")",
"==",
"5",
":",
"return",
"dt",
"-",
"timedelta",
"(",
"1",
")",
"elif",
"dt",
".",
"weekday",
"(",
")",
"==",
"6",
":",
"return",
"dt",
"-",
"timedelta",
"(",
... | If holiday falls on Saturday or Sunday, use previous Friday instead. | [
"If",
"holiday",
"falls",
"on",
"Saturday",
"or",
"Sunday",
"use",
"previous",
"Friday",
"instead",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L42-L50 | train | Returns the previous Friday. | [
30522,
13366,
3025,
1035,
5958,
1006,
26718,
1007,
1024,
1000,
1000,
1000,
2065,
6209,
4212,
2006,
5095,
2030,
4465,
1010,
2224,
3025,
5958,
2612,
1012,
1000,
1000,
1000,
2065,
26718,
1012,
16904,
1006,
1007,
1027,
1027,
1019,
1024,
2709,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/tuning.py | ParamGridBuilder.build | def build(self):
"""
Builds and returns all combinations of parameters specified
by the param grid.
"""
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
def to_key_value_pairs(keys, values):
return [(key, key.typeConverter(value)... | python | def build(self):
"""
Builds and returns all combinations of parameters specified
by the param grid.
"""
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
def to_key_value_pairs(keys, values):
return [(key, key.typeConverter(value)... | [
"def",
"build",
"(",
"self",
")",
":",
"keys",
"=",
"self",
".",
"_param_grid",
".",
"keys",
"(",
")",
"grid_values",
"=",
"self",
".",
"_param_grid",
".",
"values",
"(",
")",
"def",
"to_key_value_pairs",
"(",
"keys",
",",
"values",
")",
":",
"return",... | Builds and returns all combinations of parameters specified
by the param grid. | [
"Builds",
"and",
"returns",
"all",
"combinations",
"of",
"parameters",
"specified",
"by",
"the",
"param",
"grid",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/tuning.py#L111-L122 | train | Builds and returns all combinations of parameters specified
by the param grid. | [
30522,
13366,
3857,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
16473,
1998,
5651,
2035,
14930,
1997,
11709,
9675,
2011,
1996,
11498,
2213,
8370,
1012,
1000,
1000,
1000,
6309,
1027,
2969,
1012,
1035,
11498,
2213,
1035,
8370,
1012,
6309,
1006,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/column.py | _unary_op | def _unary_op(name, doc="unary operator"):
""" Create a method for given unary operator """
def _(self):
jc = getattr(self._jc, name)()
return Column(jc)
_.__doc__ = doc
return _ | python | def _unary_op(name, doc="unary operator"):
""" Create a method for given unary operator """
def _(self):
jc = getattr(self._jc, name)()
return Column(jc)
_.__doc__ = doc
return _ | [
"def",
"_unary_op",
"(",
"name",
",",
"doc",
"=",
"\"unary operator\"",
")",
":",
"def",
"_",
"(",
"self",
")",
":",
"jc",
"=",
"getattr",
"(",
"self",
".",
"_jc",
",",
"name",
")",
"(",
")",
"return",
"Column",
"(",
"jc",
")",
"_",
".",
"__doc__... | Create a method for given unary operator | [
"Create",
"a",
"method",
"for",
"given",
"unary",
"operator"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L81-L87 | train | Create a method for given unary operator | [
30522,
13366,
1035,
14477,
2854,
1035,
6728,
1006,
2171,
1010,
9986,
1027,
1000,
14477,
2854,
30524,
19321,
2099,
1006,
2969,
1012,
1035,
29175,
1010,
2171,
1007,
1006,
1007,
2709,
5930,
1006,
29175,
1007,
1035,
1012,
1035,
1035,
9986,
1035... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/streaming/context.py | StreamingContext.addStreamingListener | def addStreamingListener(self, streamingListener):
"""
Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for
receiving system events related to streaming.
"""
self._jssc.addStreamingListener(self._jvm.JavaStreamingListenerWrapper(
self._jvm.Pytho... | python | def addStreamingListener(self, streamingListener):
"""
Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for
receiving system events related to streaming.
"""
self._jssc.addStreamingListener(self._jvm.JavaStreamingListenerWrapper(
self._jvm.Pytho... | [
"def",
"addStreamingListener",
"(",
"self",
",",
"streamingListener",
")",
":",
"self",
".",
"_jssc",
".",
"addStreamingListener",
"(",
"self",
".",
"_jvm",
".",
"JavaStreamingListenerWrapper",
"(",
"self",
".",
"_jvm",
".",
"PythonStreamingListenerWrapper",
"(",
... | Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for
receiving system events related to streaming. | [
"Add",
"a",
"[[",
"org",
".",
"apache",
".",
"spark",
".",
"streaming",
".",
"scheduler",
".",
"StreamingListener",
"]]",
"object",
"for",
"receiving",
"system",
"events",
"related",
"to",
"streaming",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L350-L356 | train | Adds a StreamingListener object for receiving system events related to the given stream. | [
30522,
13366,
9909,
25379,
2075,
9863,
24454,
1006,
2969,
1010,
11058,
9863,
24454,
1007,
1024,
1000,
1000,
1000,
5587,
1037,
1031,
1031,
8917,
1012,
15895,
1012,
12125,
1012,
11058,
1012,
6134,
2099,
1012,
11058,
9863,
24454,
1033,
1033,
4... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.sortlevel | def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descendi... | python | def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descendi... | [
"def",
"sortlevel",
"(",
"self",
",",
"level",
"=",
"None",
",",
"ascending",
"=",
"True",
",",
"sort_remaining",
"=",
"None",
")",
":",
"return",
"self",
".",
"sort_values",
"(",
"return_indexer",
"=",
"True",
",",
"ascending",
"=",
"ascending",
")"
] | For internal compatibility with with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
---... | [
"For",
"internal",
"compatibility",
"with",
"with",
"the",
"Index",
"API",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1426-L1443 | train | Sort the index by level. | [
30522,
13366,
4066,
20414,
2884,
1006,
2969,
1010,
2504,
1027,
3904,
1010,
22316,
1027,
2995,
1010,
4066,
1035,
3588,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2005,
4722,
21778,
2007,
2007,
1996,
5950,
17928,
1012,
4066,
1996,
5950,
1012,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/streaming/dstream.py | DStream.countByWindow | def countByWindow(self, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated
by counting the number of elements in a window over this DStream.
windowDuration and slideDuration are as defined in the window() operation.
This is ... | python | def countByWindow(self, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated
by counting the number of elements in a window over this DStream.
windowDuration and slideDuration are as defined in the window() operation.
This is ... | [
"def",
"countByWindow",
"(",
"self",
",",
"windowDuration",
",",
"slideDuration",
")",
":",
"return",
"self",
".",
"map",
"(",
"lambda",
"x",
":",
"1",
")",
".",
"reduceByWindow",
"(",
"operator",
".",
"add",
",",
"operator",
".",
"sub",
",",
"windowDura... | Return a new DStream in which each RDD has a single element generated
by counting the number of elements in a window over this DStream.
windowDuration and slideDuration are as defined in the window() operation.
This is equivalent to window(windowDuration, slideDuration).count(),
but wil... | [
"Return",
"a",
"new",
"DStream",
"in",
"which",
"each",
"RDD",
"has",
"a",
"single",
"element",
"generated",
"by",
"counting",
"the",
"number",
"of",
"elements",
"in",
"a",
"window",
"over",
"this",
"DStream",
".",
"windowDuration",
"and",
"slideDuration",
"... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L473-L483 | train | Return a new DStream with a count of the number of elements in a given window over this DStream. | [
30522,
13366,
4175,
3762,
11101,
5004,
1006,
2969,
1010,
3332,
24979,
3370,
1010,
7358,
24979,
3370,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
2047,
16233,
25379,
1999,
2029,
2169,
16428,
2094,
2038,
1037,
2309,
5783,
7013,
2011,
10320,
199... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/context.py | SparkContext.getOrCreate | def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or Spark... | python | def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or Spark... | [
"def",
"getOrCreate",
"(",
"cls",
",",
"conf",
"=",
"None",
")",
":",
"with",
"SparkContext",
".",
"_lock",
":",
"if",
"SparkContext",
".",
"_active_spark_context",
"is",
"None",
":",
"SparkContext",
"(",
"conf",
"=",
"conf",
"or",
"SparkConf",
"(",
")",
... | Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional) | [
"Get",
"or",
"instantiate",
"a",
"SparkContext",
"and",
"register",
"it",
"as",
"a",
"singleton",
"object",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L353-L362 | train | Get or instantiate a SparkContext and register it as a singleton object. | [
30522,
13366,
2131,
2953,
16748,
3686,
1006,
18856,
2015,
1010,
9530,
2546,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2131,
2030,
7107,
13143,
1037,
12125,
8663,
18209,
1998,
4236,
2009,
2004,
1037,
28159,
4874,
1012,
1024,
11498,
2213,
953... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.intersect | def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx) | python | def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx) | [
"def",
"intersect",
"(",
"self",
",",
"other",
")",
":",
"return",
"DataFrame",
"(",
"self",
".",
"_jdf",
".",
"intersect",
"(",
"other",
".",
"_jdf",
")",
",",
"self",
".",
"sql_ctx",
")"
] | Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL. | [
"Return",
"a",
"new",
":",
"class",
":",
"DataFrame",
"containing",
"rows",
"only",
"in",
"both",
"this",
"frame",
"and",
"another",
"frame",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1488-L1494 | train | Returns a new DataFrame containing rows only in
both this and other. | [
30522,
13366,
29261,
1006,
2969,
1010,
2060,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
2047,
1024,
2465,
1024,
1036,
2951,
15643,
1036,
4820,
10281,
2069,
1999,
2119,
2023,
4853,
1998,
2178,
4853,
1012,
2023,
2003,
5662,
2000,
1036,
29261,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/cloudpickle.py | dumps | def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=p... | python | def dumps(obj, protocol=None):
"""Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=p... | [
"def",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"None",
")",
":",
"file",
"=",
"StringIO",
"(",
")",
"try",
":",
"cp",
"=",
"CloudPickler",
"(",
"file",
",",
"protocol",
"=",
"protocol",
")",
"cp",
".",
"dump",
"(",
"obj",
")",
"return",
"file",
... | Serialize obj as a string of bytes allocated in memory
protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed
between processes running the same Python version.
Set protocol=pickle.DEFAULT_PROTOCOL instead if you ... | [
"Serialize",
"obj",
"as",
"a",
"string",
"of",
"bytes",
"allocated",
"in",
"memory"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/cloudpickle.py#L939-L955 | train | Serialize obj as a string of bytes allocated in memory
| [
30522,
13366,
15653,
2015,
1006,
27885,
3501,
1010,
8778,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
7642,
4697,
27885,
3501,
2004,
1037,
5164,
1997,
27507,
30524,
3284,
1035,
8778,
1012,
2023,
4292,
21191,
4555,
4807,
3177,
2090,
6194,
2770... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/random.py | RandomRDDs.poissonVectorRDD | def poissonVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or lam... | python | def poissonVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or lam... | [
"def",
"poissonVectorRDD",
"(",
"sc",
",",
"mean",
",",
"numRows",
",",
"numCols",
",",
"numPartitions",
"=",
"None",
",",
"seed",
"=",
"None",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"poissonVectorRDD\"",
",",
"sc",
".",
"_jsc",
",",
"float",
"(",
"... | Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or lambda, for the Poisson distribution.
:param numRows: Number of Vectors in the RDD.
:par... | [
"Generates",
"an",
"RDD",
"comprised",
"of",
"vectors",
"containing",
"i",
".",
"i",
".",
"d",
".",
"samples",
"drawn",
"from",
"the",
"Poisson",
"distribution",
"with",
"the",
"input",
"mean",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L317-L343 | train | Generates an RDD comprised of vectors drawn from the Poisson distribution with the input mean. | [
30522,
13366,
13433,
24077,
3726,
16761,
4103,
2094,
1006,
8040,
1010,
2812,
1010,
16371,
2213,
10524,
2015,
1010,
16371,
12458,
27896,
1010,
16371,
8737,
8445,
22753,
2015,
1027,
3904,
1010,
6534,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/dtypes/inference.py | is_list_like | def is_list_like(obj, allow_sets=True):
"""
Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
o... | python | def is_list_like(obj, allow_sets=True):
"""
Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
o... | [
"def",
"is_list_like",
"(",
"obj",
",",
"allow_sets",
"=",
"True",
")",
":",
"return",
"(",
"isinstance",
"(",
"obj",
",",
"abc",
".",
"Iterable",
")",
"and",
"# we do not count strings/unicode/bytes as list-like",
"not",
"isinstance",
"(",
"obj",
",",
"(",
"s... | Check if the object is list-like.
Objects that are considered list-like are for example Python
lists, tuples, sets, NumPy arrays, and Pandas Series.
Strings and datetime objects, however, are not considered list-like.
Parameters
----------
obj : The object to check
allow_sets : boolean, d... | [
"Check",
"if",
"the",
"object",
"is",
"list",
"-",
"like",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/inference.py#L245-L293 | train | Checks if the object is list - like. | [
30522,
13366,
2003,
1035,
2862,
1035,
2066,
1006,
27885,
3501,
1010,
30524,
4874,
2003,
2862,
1011,
2066,
1012,
5200,
2008,
2024,
2641,
2862,
1011,
2066,
2024,
2005,
2742,
18750,
7201,
1010,
10722,
21112,
1010,
4520,
1010,
16371,
8737,
2100... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/stat/KernelDensity.py | KernelDensity.estimate | def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities) | python | def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities) | [
"def",
"estimate",
"(",
"self",
",",
"points",
")",
":",
"points",
"=",
"list",
"(",
"points",
")",
"densities",
"=",
"callMLlibFunc",
"(",
"\"estimateKernelDensity\"",
",",
"self",
".",
"_sample",
",",
"self",
".",
"_bandwidth",
",",
"points",
")",
"retur... | Estimate the probability density at points | [
"Estimate",
"the",
"probability",
"density",
"at",
"points"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/stat/KernelDensity.py#L54-L59 | train | Estimate the probability density at a set of points | [
30522,
13366,
10197,
1006,
2969,
1010,
2685,
1007,
1024,
1000,
1000,
1000,
10197,
1996,
9723,
4304,
2012,
2685,
1000,
1000,
1000,
2685,
1027,
2862,
1006,
2685,
1007,
7939,
24279,
1027,
2655,
19968,
29521,
11263,
12273,
1006,
1000,
10197,
54... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.zipWithIndex | def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition rec... | python | def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition rec... | [
"def",
"zipWithIndex",
"(",
"self",
")",
":",
"starts",
"=",
"[",
"0",
"]",
"if",
"self",
".",
"getNumPartitions",
"(",
")",
">",
"1",
":",
"nums",
"=",
"self",
".",
"mapPartitions",
"(",
"lambda",
"it",
":",
"[",
"sum",
"(",
"1",
"for",
"i",
"in... | Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This metho... | [
"Zips",
"this",
"RDD",
"with",
"its",
"element",
"indices",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2159-L2184 | train | Returns an RDD with its element indices. | [
30522,
13366,
14101,
24415,
22254,
10288,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
14101,
2015,
2023,
16428,
2094,
2007,
2049,
5783,
29299,
1012,
1996,
13063,
2003,
2034,
2241,
2006,
1996,
13571,
5950,
1998,
2059,
1996,
13063,
1997,
5167,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | translate | def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `match... | python | def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `match... | [
"def",
"translate",
"(",
"srcCol",
",",
"matching",
",",
"replace",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"translate",
"(",
"_to_java_column",
"(",
"srcCol",
")",... | A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate... | [
"A",
"function",
"translate",
"any",
"character",
"in",
"the",
"srcCol",
"by",
"a",
"character",
"in",
"matching",
".",
"The",
"characters",
"in",
"replace",
"is",
"corresponding",
"to",
"the",
"characters",
"in",
"matching",
".",
"The",
"translate",
"will",
... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1856-L1867 | train | A function translate any character in srcCol by a character in matching. | [
30522,
13366,
17637,
1006,
5034,
21408,
2140,
1010,
9844,
1010,
5672,
1007,
1024,
1000,
1000,
1000,
1037,
3853,
17637,
2151,
2839,
1999,
1996,
1036,
5034,
21408,
2140,
1036,
2011,
1037,
2839,
1999,
1036,
9844,
1036,
1012,
1996,
3494,
1999,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/groupby.py | GroupBy.cumcount | def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True... | python | def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True... | [
"def",
"cumcount",
"(",
"self",
",",
"ascending",
"=",
"True",
")",
":",
"with",
"_group_selection_context",
"(",
"self",
")",
":",
"index",
"=",
"self",
".",
"_selected_obj",
".",
"index",
"cumcounts",
"=",
"self",
".",
"_cumcount_array",
"(",
"ascending",
... | Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of... | [
"Number",
"each",
"item",
"in",
"each",
"group",
"from",
"0",
"to",
"the",
"length",
"of",
"that",
"group",
"-",
"1",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1846-L1897 | train | Return a Series containing the cumulative count of each item in each group. | [
30522,
13366,
13988,
3597,
16671,
1006,
2969,
1010,
22316,
1027,
2995,
1007,
1024,
1000,
1000,
1000,
2193,
2169,
8875,
1999,
2169,
2177,
2013,
1014,
2000,
1996,
3091,
1997,
2008,
2177,
1011,
1015,
1012,
7687,
2023,
2003,
5662,
2000,
1028,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/linalg/distributed.py | BlockMatrix.transpose | def transpose(self):
"""
Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3,... | python | def transpose(self):
"""
Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3,... | [
"def",
"transpose",
"(",
"self",
")",
":",
"java_transposed_matrix",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"transpose\"",
")",
"return",
"BlockMatrix",
"(",
"java_transposed_matrix",
",",
"self",
".",
"colsPerBlock",
",",
"self",
".",
"ro... | Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))])
>>>... | [
"Transpose",
"this",
"BlockMatrix",
".",
"Returns",
"a",
"new",
"BlockMatrix",
"instance",
"sharing",
"the",
"same",
"underlying",
"data",
".",
"Is",
"a",
"lazy",
"operation",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L1290-L1304 | train | Returns a new BlockMatrix with the same underlying data. | [
30522,
13366,
9099,
20688,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
9099,
20688,
2023,
3796,
18900,
17682,
1012,
5651,
1037,
2047,
3796,
18900,
17682,
6013,
6631,
1996,
2168,
10318,
2951,
1012,
2003,
1037,
13971,
3169,
1012,
1028,
1028,
10... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/clustering.py | LDA.train | def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,
topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"):
"""Train a LDA model.
:param rdd:
RDD of documents, which are tuples of document IDs and term
(word) count vectors. The term c... | python | def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0,
topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"):
"""Train a LDA model.
:param rdd:
RDD of documents, which are tuples of document IDs and term
(word) count vectors. The term c... | [
"def",
"train",
"(",
"cls",
",",
"rdd",
",",
"k",
"=",
"10",
",",
"maxIterations",
"=",
"20",
",",
"docConcentration",
"=",
"-",
"1.0",
",",
"topicConcentration",
"=",
"-",
"1.0",
",",
"seed",
"=",
"None",
",",
"checkpointInterval",
"=",
"10",
",",
"... | Train a LDA model.
:param rdd:
RDD of documents, which are tuples of document IDs and term
(word) count vectors. The term count vectors are "bags of
words" with a fixed-size vocabulary (where the vocabulary size
is the length of the vector). Document IDs must be unique
... | [
"Train",
"a",
"LDA",
"model",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L999-L1039 | train | Train a LDA model. | [
30522,
13366,
3345,
1006,
18856,
2015,
1010,
16428,
2094,
1010,
1047,
1027,
2184,
1010,
21510,
14621,
9285,
1027,
2322,
1010,
9986,
8663,
13013,
8156,
1027,
1011,
1015,
1012,
1014,
1010,
8476,
8663,
13013,
8156,
1027,
1011,
1015,
1012,
1014... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/context.py | SQLContext.createExternalTable | def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
... | python | def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
... | [
"def",
"createExternalTable",
"(",
"self",
",",
"tableName",
",",
"path",
"=",
"None",
",",
"source",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"return",
"self",
".",
"sparkSession",
".",
"catalog",
".",
"createExternal... | Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sourc... | [
"Creates",
"an",
"external",
"table",
"based",
"on",
"the",
"dataset",
"in",
"a",
"data",
"source",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L329-L344 | train | Creates an external table based on the dataset in a data source. | [
30522,
13366,
3443,
10288,
16451,
2389,
10880,
1006,
2969,
1010,
2795,
18442,
1010,
4130,
1027,
3904,
1010,
3120,
1027,
3904,
1010,
8040,
28433,
1027,
3904,
1010,
1008,
1008,
7047,
1007,
1024,
1000,
1000,
1000,
9005,
2019,
6327,
2795,
2241,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/readwriter.py | DataFrameReader.jdbc | def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
... | python | def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
... | [
"def",
"jdbc",
"(",
"self",
",",
"url",
",",
"table",
",",
"column",
"=",
"None",
",",
"lowerBound",
"=",
"None",
",",
"upperBound",
"=",
"None",
",",
"numPartitions",
"=",
"None",
",",
"predicates",
"=",
"None",
",",
"properties",
"=",
"None",
")",
... | Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartiti... | [
"Construct",
"a",
":",
"class",
":",
"DataFrame",
"representing",
"the",
"database",
"table",
"named",
"table",
"accessible",
"via",
"JDBC",
"URL",
"url",
"and",
"connection",
"properties",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L521-L569 | train | This method returns a DataFrame representing the database table named table in the specified JDBC URL table and column. | [
30522,
13366,
26219,
9818,
1006,
2969,
1010,
24471,
2140,
1010,
2795,
1010,
5930,
1027,
3904,
1010,
2896,
15494,
1027,
3904,
1010,
3356,
15494,
1027,
3904,
1010,
16371,
8737,
8445,
22753,
2015,
1027,
3904,
1010,
3653,
16467,
2015,
1027,
390... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | sql/gen-sql-markdown.py | _make_pretty_examples | def _make_pretty_examples(examples):
"""
Makes the examples description pretty and returns a formatted string if `examples`
starts with the example prefix. Otherwise, returns None.
Expected input:
Examples:
> SELECT ...;
...
> SELECT ...;
...
Expe... | python | def _make_pretty_examples(examples):
"""
Makes the examples description pretty and returns a formatted string if `examples`
starts with the example prefix. Otherwise, returns None.
Expected input:
Examples:
> SELECT ...;
...
> SELECT ...;
...
Expe... | [
"def",
"_make_pretty_examples",
"(",
"examples",
")",
":",
"if",
"examples",
".",
"startswith",
"(",
"\"\\n Examples:\"",
")",
":",
"examples",
"=",
"\"\\n\"",
".",
"join",
"(",
"map",
"(",
"lambda",
"u",
":",
"u",
"[",
"6",
":",
"]",
",",
"examples",... | Makes the examples description pretty and returns a formatted string if `examples`
starts with the example prefix. Otherwise, returns None.
Expected input:
Examples:
> SELECT ...;
...
> SELECT ...;
...
Expected output:
**Examples:**
```
> SEL... | [
"Makes",
"the",
"examples",
"description",
"pretty",
"and",
"returns",
"a",
"formatted",
"string",
"if",
"examples",
"starts",
"with",
"the",
"example",
"prefix",
".",
"Otherwise",
"returns",
"None",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L89-L116 | train | Makes the examples description pretty and returns a formatted string if examples starts with the example prefix. Otherwise returns None. | [
30522,
13366,
1035,
2191,
1035,
3492,
1035,
4973,
1006,
4973,
1007,
1024,
1000,
1000,
1000,
3084,
1996,
4973,
6412,
3492,
1998,
5651,
1037,
4289,
3064,
5164,
2065,
1036,
4973,
1036,
4627,
2007,
1996,
2742,
17576,
1012,
4728,
1010,
5651,
3... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/sorting.py | safe_sort | def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
-... | python | def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
-... | [
"def",
"safe_sort",
"(",
"values",
",",
"labels",
"=",
"None",
",",
"na_sentinel",
"=",
"-",
"1",
",",
"assume_unique",
"=",
"False",
")",
":",
"if",
"not",
"is_list_like",
"(",
"values",
")",
":",
"raise",
"TypeError",
"(",
"\"Only list-like objects are all... | Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is no... | [
"Sort",
"values",
"and",
"reorder",
"corresponding",
"labels",
".",
"values",
"should",
"be",
"unique",
"if",
"labels",
"is",
"not",
"None",
".",
"Safe",
"for",
"use",
"with",
"mixed",
"types",
"(",
"int",
"str",
")",
"orders",
"ints",
"before",
"strs",
... | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sorting.py#L406-L507 | train | Sort the given values and reorder corresponding labels. | [
30522,
13366,
3647,
1035,
4066,
1006,
5300,
1010,
10873,
1027,
3904,
1010,
6583,
1035,
16074,
1027,
1011,
1015,
1010,
7868,
1035,
4310,
1027,
6270,
1007,
1024,
1000,
1000,
1000,
4066,
1036,
1036,
5300,
1036,
1036,
1998,
2128,
8551,
2121,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/feature.py | StopWordsRemover.loadDefaultStopWords | def loadDefaultStopWords(language):
"""
Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish
"""
stopWordsObj = _jvm().org.a... | python | def loadDefaultStopWords(language):
"""
Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish
"""
stopWordsObj = _jvm().org.a... | [
"def",
"loadDefaultStopWords",
"(",
"language",
")",
":",
"stopWordsObj",
"=",
"_jvm",
"(",
")",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"feature",
".",
"StopWordsRemover",
"return",
"list",
"(",
"stopWordsObj",
".",
"loadDefaultStopWords",
... | Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish | [
"Loads",
"the",
"default",
"stop",
"words",
"for",
"the",
"given",
"language",
".",
"Supported",
"languages",
":",
"danish",
"dutch",
"english",
"finnish",
"french",
"german",
"hungarian",
"italian",
"norwegian",
"portuguese",
"russian",
"spanish",
"swedish",
"tur... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L2708-L2715 | train | Loads the default stop words for the given language. | [
30522,
13366,
7170,
3207,
7011,
11314,
16033,
28400,
8551,
2015,
1006,
2653,
1007,
1024,
1000,
1000,
1000,
15665,
1996,
12398,
2644,
2616,
2005,
1996,
2445,
2653,
1012,
3569,
4155,
1024,
5695,
1010,
3803,
1010,
2394,
1010,
6983,
1010,
2413,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/context.py | SQLContext.createDataFrame | def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
... | python | def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
... | [
"def",
"createDataFrame",
"(",
"self",
",",
"data",
",",
"schema",
"=",
"None",
",",
"samplingRatio",
"=",
"None",
",",
"verifySchema",
"=",
"True",
")",
":",
"return",
"self",
".",
"sparkSession",
".",
"createDataFrame",
"(",
"data",
",",
"schema",
",",
... | Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data... | [
"Creates",
"a",
":",
"class",
":",
"DataFrame",
"from",
"an",
":",
"class",
":",
"RDD",
"a",
"list",
"or",
"a",
":",
"class",
":",
"pandas",
".",
"DataFrame",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L218-L307 | train | Creates a DataFrame from an RDD of data. | [
30522,
13366,
2580,
6790,
15643,
1006,
2969,
1010,
2951,
1010,
8040,
28433,
1027,
3904,
1010,
16227,
8609,
3695,
1027,
3904,
1010,
20410,
22842,
2863,
1027,
2995,
1007,
1024,
1000,
1000,
1000,
9005,
1037,
1024,
2465,
1024,
1036,
2951,
15643... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/context.py | SQLContext.tables | def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanTy... | python | def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanTy... | [
"def",
"tables",
"(",
"self",
",",
"dbName",
"=",
"None",
")",
":",
"if",
"dbName",
"is",
"None",
":",
"return",
"DataFrame",
"(",
"self",
".",
"_ssql_ctx",
".",
"tables",
"(",
")",
",",
"self",
")",
"else",
":",
"return",
"DataFrame",
"(",
"self",
... | Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary o... | [
"Returns",
"a",
":",
"class",
":",
"DataFrame",
"containing",
"names",
"of",
"tables",
"in",
"the",
"given",
"database",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/context.py#L375-L394 | train | Returns a DataFrame containing names of tables in the given database. | [
30522,
13366,
7251,
1006,
2969,
1010,
16962,
18442,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
1024,
2465,
1024,
1036,
2951,
15643,
1036,
4820,
3415,
1997,
7251,
1999,
1996,
2445,
7809,
1012,
2065,
1036,
30524,
13874,
1036,
8131,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/java_gateway.py | launch_gateway | def launch_gateway(conf=None, popen_kwargs=None):
"""
launch jvm gateway
:param conf: spark configuration passed to spark-submit
:param popen_kwargs: Dictionary of kwargs to pass to Popen when spawning
the py4j JVM. This is a developer feature intended for use in
customizing how pyspark ... | python | def launch_gateway(conf=None, popen_kwargs=None):
"""
launch jvm gateway
:param conf: spark configuration passed to spark-submit
:param popen_kwargs: Dictionary of kwargs to pass to Popen when spawning
the py4j JVM. This is a developer feature intended for use in
customizing how pyspark ... | [
"def",
"launch_gateway",
"(",
"conf",
"=",
"None",
",",
"popen_kwargs",
"=",
"None",
")",
":",
"if",
"\"PYSPARK_GATEWAY_PORT\"",
"in",
"os",
".",
"environ",
":",
"gateway_port",
"=",
"int",
"(",
"os",
".",
"environ",
"[",
"\"PYSPARK_GATEWAY_PORT\"",
"]",
")"... | launch jvm gateway
:param conf: spark configuration passed to spark-submit
:param popen_kwargs: Dictionary of kwargs to pass to Popen when spawning
the py4j JVM. This is a developer feature intended for use in
customizing how pyspark interacts with the py4j JVM (e.g., capturing
stdout/st... | [
"launch",
"jvm",
"gateway",
":",
"param",
"conf",
":",
"spark",
"configuration",
"passed",
"to",
"spark",
"-",
"submit",
":",
"param",
"popen_kwargs",
":",
"Dictionary",
"of",
"kwargs",
"to",
"pass",
"to",
"Popen",
"when",
"spawning",
"the",
"py4j",
"JVM",
... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/java_gateway.py#L39-L147 | train | Launch the jvm gateway using Spark s submit command. | [
30522,
30524,
2546,
1024,
12125,
9563,
2979,
2000,
12125,
1011,
12040,
1024,
11498,
2213,
4831,
2078,
1035,
6448,
2906,
5620,
1024,
9206,
1997,
6448,
2906,
5620,
2000,
3413,
2000,
4831,
2078,
2043,
27957,
1996,
1052,
2100,
2549,
3501,
1046,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.countApprox | def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1... | python | def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1... | [
"def",
"countApprox",
"(",
"self",
",",
"timeout",
",",
"confidence",
"=",
"0.95",
")",
":",
"drdd",
"=",
"self",
".",
"mapPartitions",
"(",
"lambda",
"it",
":",
"[",
"float",
"(",
"sum",
"(",
"1",
"for",
"i",
"in",
"it",
")",
")",
"]",
")",
"ret... | .. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000 | [
"..",
"note",
"::",
"Experimental"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2302-L2314 | train | Count the number of incomplete entries within a given timeout. | [
30522,
13366,
4175,
29098,
3217,
2595,
1006,
2969,
1010,
2051,
5833,
1010,
7023,
1027,
1014,
1012,
5345,
1007,
1024,
1000,
1000,
1000,
1012,
1012,
3602,
1024,
1024,
6388,
15796,
2544,
1997,
4175,
1006,
1007,
2008,
5651,
1037,
9280,
12958,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/tree.py | GradientBoostedTrees.trainClassifier | def trainClassifier(cls, data, categoricalFeaturesInfo,
loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for classification.
:param data:
Training dataset: RDD of Labe... | python | def trainClassifier(cls, data, categoricalFeaturesInfo,
loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for classification.
:param data:
Training dataset: RDD of Labe... | [
"def",
"trainClassifier",
"(",
"cls",
",",
"data",
",",
"categoricalFeaturesInfo",
",",
"loss",
"=",
"\"logLoss\"",
",",
"numIterations",
"=",
"100",
",",
"learningRate",
"=",
"0.1",
",",
"maxDepth",
"=",
"3",
",",
"maxBins",
"=",
"32",
")",
":",
"return",... | Train a gradient-boosted trees model for classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1}.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is... | [
"Train",
"a",
"gradient",
"-",
"boosted",
"trees",
"model",
"for",
"classification",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/tree.py#L511-L576 | train | Train a gradient - boosted trees model for classification. | [
30522,
13366,
3345,
26266,
18095,
1006,
18856,
2015,
1010,
2951,
1010,
4937,
27203,
7959,
4017,
14900,
2378,
14876,
1010,
3279,
1027,
1000,
8833,
10483,
2015,
1000,
1010,
16371,
23419,
28893,
1027,
2531,
1010,
4083,
11657,
1027,
1014,
1012,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | shiftRight | def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
r... | python | def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
r... | [
"def",
"shiftRight",
"(",
"col",
",",
"numBits",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"jc",
"=",
"sc",
".",
"_jvm",
".",
"functions",
".",
"shiftRight",
"(",
"_to_java_column",
"(",
"col",
")",
",",
"numBits",
")",
"return",
... | (Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)] | [
"(",
"Signed",
")",
"shift",
"the",
"given",
"value",
"numBits",
"right",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L656-L664 | train | Shift the given value numBits right. | [
30522,
13366,
5670,
15950,
1006,
8902,
1010,
15903,
12762,
1007,
1024,
1000,
1000,
1000,
1006,
2772,
1007,
5670,
1996,
2445,
3643,
15903,
12762,
2157,
1012,
1028,
1028,
1028,
12125,
1012,
2580,
6790,
15643,
1006,
1031,
1006,
4413,
1010,
100... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._simple_new | def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
... | python | def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
... | [
"def",
"_simple_new",
"(",
"cls",
",",
"values",
",",
"name",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"hasattr",
"(",
"values",
",",
"'dtype'",
")",
":",
"if",
"(",
"values",
"is",
"None",
"or",
"no... | We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse. | [
"We",
"require",
"that",
"we",
"have",
"a",
"dtype",
"compat",
"for",
"the",
"values",
".",
"If",
"we",
"are",
"passed",
"a",
"non",
"-",
"dtype",
"compat",
"then",
"coerce",
"using",
"the",
"constructor",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L506-L539 | train | Create a new object of the same class with the passed values. | [
30522,
13366,
1035,
3722,
1035,
2047,
1006,
18856,
2015,
1010,
5300,
1010,
2171,
1027,
3904,
1010,
26718,
18863,
1027,
3904,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
2057,
5478,
2008,
2057,
2031,
1037,
26718,
18863,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.transform | def transform(self, func):
"""Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations.
:param func: a function that takes and returns a class:`DataFrame`.
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", ... | python | def transform(self, func):
"""Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations.
:param func: a function that takes and returns a class:`DataFrame`.
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", ... | [
"def",
"transform",
"(",
"self",
",",
"func",
")",
":",
"result",
"=",
"func",
"(",
"self",
")",
"assert",
"isinstance",
"(",
"result",
",",
"DataFrame",
")",
",",
"\"Func returned an instance of type [%s], \"",
"\"should have been DataFrame.\"",
"%",
"type",
"(",... | Returns a new class:`DataFrame`. Concise syntax for chaining custom transformations.
:param func: a function that takes and returns a class:`DataFrame`.
>>> from pyspark.sql.functions import col
>>> df = spark.createDataFrame([(1, 1.0), (2, 2.0)], ["int", "float"])
>>> def cast_all_to_... | [
"Returns",
"a",
"new",
"class",
":",
"DataFrame",
".",
"Concise",
"syntax",
"for",
"chaining",
"custom",
"transformations",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L2056-L2078 | train | Returns a new class : DataFrame. Concise syntax for chaining custom transformations. | [
30522,
13366,
10938,
1006,
2969,
1010,
4569,
2278,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
2047,
2465,
1024,
1036,
2951,
15643,
1036,
1012,
9530,
18380,
20231,
2005,
4677,
2075,
7661,
21865,
1012,
1024,
11498,
2213,
4569,
2278,
1024,
1037... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/catalog.py | Catalog.createTable | def createTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not ... | python | def createTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not ... | [
"def",
"createTable",
"(",
"self",
",",
"tableName",
",",
"path",
"=",
"None",
",",
"source",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"if",
"path",
"is",
"not",
"None",
":",
"options",
"[",
"\"path\"",
"]",
"=",... | Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will b... | [
"Creates",
"a",
"table",
"based",
"on",
"the",
"dataset",
"in",
"a",
"data",
"source",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/catalog.py#L162-L188 | train | Creates a table based on the dataset in a data source. | [
30522,
13366,
3443,
10880,
1006,
2969,
1010,
2795,
18442,
1010,
4130,
1027,
3904,
1010,
3120,
1027,
3904,
1010,
8040,
28433,
1027,
3904,
1010,
1008,
1008,
7047,
1007,
1024,
1000,
1000,
1000,
9005,
1037,
2795,
2241,
2006,
1996,
2951,
13462,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._add_numeric_methods_disabled | def _add_numeric_methods_disabled(cls):
"""
Add in numeric methods to disable other than add/sub.
"""
cls.__pow__ = make_invalid_op('__pow__')
cls.__rpow__ = make_invalid_op('__rpow__')
cls.__mul__ = make_invalid_op('__mul__')
cls.__rmul__ = make_invalid_op('__rmu... | python | def _add_numeric_methods_disabled(cls):
"""
Add in numeric methods to disable other than add/sub.
"""
cls.__pow__ = make_invalid_op('__pow__')
cls.__rpow__ = make_invalid_op('__rpow__')
cls.__mul__ = make_invalid_op('__mul__')
cls.__rmul__ = make_invalid_op('__rmu... | [
"def",
"_add_numeric_methods_disabled",
"(",
"cls",
")",
":",
"cls",
".",
"__pow__",
"=",
"make_invalid_op",
"(",
"'__pow__'",
")",
"cls",
".",
"__rpow__",
"=",
"make_invalid_op",
"(",
"'__rpow__'",
")",
"cls",
".",
"__mul__",
"=",
"make_invalid_op",
"(",
"'__... | Add in numeric methods to disable other than add/sub. | [
"Add",
"in",
"numeric",
"methods",
"to",
"disable",
"other",
"than",
"add",
"/",
"sub",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5028-L5045 | train | Add in numeric methods to disable other than add or sub. | [
30522,
13366,
1035,
5587,
1035,
16371,
25531,
1035,
4725,
1035,
9776,
1006,
18856,
2015,
1007,
1024,
1000,
1000,
1000,
5587,
1999,
16371,
25531,
4725,
2000,
4487,
19150,
2060,
2084,
5587,
1013,
4942,
1012,
1000,
1000,
1000,
18856,
2015,
101... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/common.py | JavaModelWrapper.call | def call(self, name, *a):
"""Call method of java_model"""
return callJavaFunc(self._sc, getattr(self._java_model, name), *a) | python | def call(self, name, *a):
"""Call method of java_model"""
return callJavaFunc(self._sc, getattr(self._java_model, name), *a) | [
"def",
"call",
"(",
"self",
",",
"name",
",",
"*",
"a",
")",
":",
"return",
"callJavaFunc",
"(",
"self",
".",
"_sc",
",",
"getattr",
"(",
"self",
".",
"_java_model",
",",
"name",
")",
",",
"*",
"a",
")"
] | Call method of java_model | [
"Call",
"method",
"of",
"java_model"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/common.py#L144-L146 | train | Call method of java_model | [
30522,
13366,
2655,
1006,
2969,
1010,
2171,
1010,
1008,
1037,
1007,
1024,
1000,
1000,
1000,
2655,
4118,
1997,
9262,
1035,
2944,
1000,
1000,
1000,
2709,
2655,
3900,
3567,
11263,
12273,
1006,
2969,
1012,
1035,
8040,
1010,
2131,
19321,
2099,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
huggingface/pytorch-pretrained-BERT | pytorch_pretrained_bert/tokenization.py | BertTokenizer.from_pretrained | def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHI... | python | def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHI... | [
"def",
"from_pretrained",
"(",
"cls",
",",
"pretrained_model_name_or_path",
",",
"cache_dir",
"=",
"None",
",",
"*",
"inputs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pretrained_model_name_or_path",
"in",
"PRETRAINED_VOCAB_ARCHIVE_MAP",
":",
"vocab_file",
"=",
"... | Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed. | [
"Instantiate",
"a",
"PreTrainedBertModel",
"from",
"a",
"pre",
"-",
"trained",
"model",
"file",
".",
"Download",
"and",
"cache",
"the",
"pre",
"-",
"trained",
"model",
"file",
"if",
"needed",
"."
] | b832d5bb8a6dfc5965015b828e577677eace601e | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L153-L198 | train | Instantiate a PreTrainedBertModel from a pre - trained model file. | [
30522,
13366,
2013,
1035,
3653,
23654,
2098,
1006,
18856,
2015,
1010,
3653,
23654,
2098,
1035,
2944,
1035,
2171,
1035,
2030,
1035,
4130,
1010,
17053,
1035,
16101,
1027,
3904,
1010,
1008,
20407,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/common.py | callJavaFunc | def callJavaFunc(sc, func, *args):
""" Call Java Function """
args = [_py2java(sc, a) for a in args]
return _java2py(sc, func(*args)) | python | def callJavaFunc(sc, func, *args):
""" Call Java Function """
args = [_py2java(sc, a) for a in args]
return _java2py(sc, func(*args)) | [
"def",
"callJavaFunc",
"(",
"sc",
",",
"func",
",",
"*",
"args",
")",
":",
"args",
"=",
"[",
"_py2java",
"(",
"sc",
",",
"a",
")",
"for",
"a",
"in",
"args",
"]",
"return",
"_java2py",
"(",
"sc",
",",
"func",
"(",
"*",
"args",
")",
")"
] | Call Java Function | [
"Call",
"Java",
"Function"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/common.py#L120-L123 | train | Call Java Function | [
30522,
13366,
2655,
3900,
3567,
11263,
12273,
1006,
8040,
1010,
4569,
2278,
1010,
1008,
12098,
5620,
1007,
1024,
1000,
1000,
1000,
2655,
9262,
3853,
1000,
1000,
1000,
12098,
5620,
1027,
1031,
1035,
1052,
2100,
2475,
3900,
3567,
1006,
8040,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/groupby.py | _GroupBy._get_indices | def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isi... | python | def _get_indices(self, names):
"""
Safe get multiple indices, translate keys for
datelike to underlying repr.
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isi... | [
"def",
"_get_indices",
"(",
"self",
",",
"names",
")",
":",
"def",
"get_converter",
"(",
"s",
")",
":",
"# possibly convert to the actual key types",
"# in the indices, could be a Timestamp or a np.datetime64",
"if",
"isinstance",
"(",
"s",
",",
"(",
"Timestamp",
",",
... | Safe get multiple indices, translate keys for
datelike to underlying repr. | [
"Safe",
"get",
"multiple",
"indices",
"translate",
"keys",
"for",
"datelike",
"to",
"underlying",
"repr",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L409-L457 | train | Internal method to get multiple indices for a given set of names. | [
30522,
13366,
1035,
2131,
1035,
29299,
1006,
2969,
1010,
3415,
1007,
1024,
1000,
1000,
1000,
3647,
2131,
3674,
29299,
1010,
17637,
6309,
2005,
3058,
10359,
2000,
10318,
16360,
2099,
1012,
1000,
1000,
1000,
13366,
2131,
1035,
10463,
2121,
10... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/window.py | _Window._center_window | def _center_window(self, result, window):
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if o... | python | def _center_window(self, result, window):
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = _offset(window, True)
if o... | [
"def",
"_center_window",
"(",
"self",
",",
"result",
",",
"window",
")",
":",
"if",
"self",
".",
"axis",
">",
"result",
".",
"ndim",
"-",
"1",
":",
"raise",
"ValueError",
"(",
"\"Requested axis is larger then no. of argument \"",
"\"dimensions\"",
")",
"offset",... | Center the result in the window. | [
"Center",
"the",
"result",
"in",
"the",
"window",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L290-L306 | train | Center the result in the window. | [
30522,
13366,
1035,
2415,
1035,
3332,
1006,
2969,
1010,
2765,
1010,
3332,
1007,
1024,
1000,
1000,
1000,
2415,
1996,
2765,
1999,
1996,
3332,
1012,
1000,
1000,
1000,
2065,
2969,
1012,
8123,
1028,
2765,
1012,
1050,
22172,
1011,
1015,
1024,
5... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/context.py | SparkContext.pickleFile | def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFi... | python | def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFi... | [
"def",
"pickleFile",
"(",
"self",
",",
"name",
",",
"minPartitions",
"=",
"None",
")",
":",
"minPartitions",
"=",
"minPartitions",
"or",
"self",
".",
"defaultMinPartitions",
"return",
"RDD",
"(",
"self",
".",
"_jsc",
".",
"objectFile",
"(",
"name",
",",
"m... | Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9... | [
"Load",
"an",
"RDD",
"previously",
"saved",
"using",
"L",
"{",
"RDD",
".",
"saveAsPickleFile",
"}",
"method",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L568-L579 | train | Load an RDD previously saved using pickleFile method. | [
30522,
13366,
4060,
2571,
8873,
2571,
1006,
2969,
1010,
2171,
1010,
8117,
19362,
3775,
9285,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
7170,
2019,
16428,
2094,
3130,
5552,
2478,
1048,
1063,
16428,
2094,
1012,
3828,
3022,
24330,
19099,
8873,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/regression.py | LinearRegressionModel.summary | def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary)
... | python | def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self).summary)
... | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"hasSummary",
":",
"return",
"LinearRegressionTrainingSummary",
"(",
"super",
"(",
"LinearRegressionModel",
",",
"self",
")",
".",
"summary",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No trai... | Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`. | [
"Gets",
"summary",
"(",
"e",
".",
"g",
".",
"residuals",
"mse",
"r",
"-",
"squared",
")",
"of",
"model",
"on",
"training",
"set",
".",
"An",
"exception",
"is",
"thrown",
"if",
"trainingSummary",
"is",
"None",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/regression.py#L198-L208 | train | Returns the summary of the LinearRegressionModel. | [
30522,
13366,
12654,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
4152,
12654,
1006,
1041,
1012,
1043,
1012,
21961,
2015,
1010,
5796,
2063,
1010,
1054,
1011,
19942,
1007,
1997,
2944,
2006,
2731,
2275,
1012,
2019,
6453,
2003,
6908,
2065,
1036,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/image.py | _ImageSchema.ocvTypes | def ocvTypes(self):
"""
Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0
"""
if self._ocvTypes is None:
ctx = SparkContext._active_spark_context
self._ocvTypes... | python | def ocvTypes(self):
"""
Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0
"""
if self._ocvTypes is None:
ctx = SparkContext._active_spark_context
self._ocvTypes... | [
"def",
"ocvTypes",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ocvTypes",
"is",
"None",
":",
"ctx",
"=",
"SparkContext",
".",
"_active_spark_context",
"self",
".",
"_ocvTypes",
"=",
"dict",
"(",
"ctx",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spar... | Returns the OpenCV type mapping supported.
:return: a dictionary containing the OpenCV type mapping supported.
.. versionadded:: 2.3.0 | [
"Returns",
"the",
"OpenCV",
"type",
"mapping",
"supported",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/image.py#L72-L84 | train | Returns the OpenCV type mapping supported. | [
30522,
13366,
1051,
2278,
2615,
13874,
2015,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
5651,
1996,
2330,
2278,
2615,
2828,
12375,
3569,
1012,
1024,
2709,
1024,
1037,
9206,
4820,
1996,
2330,
2278,
2615,
30524,
2065,
2969,
1012,
1035,
1051,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/streaming/dstream.py | DStream.updateStateByKey | def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None):
"""
Return a new "state" DStream where the state for each key is updated by applying
the given function on the previous state of the key and the new values of the key.
@param updateFunc: State update function. ... | python | def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None):
"""
Return a new "state" DStream where the state for each key is updated by applying
the given function on the previous state of the key and the new values of the key.
@param updateFunc: State update function. ... | [
"def",
"updateStateByKey",
"(",
"self",
",",
"updateFunc",
",",
"numPartitions",
"=",
"None",
",",
"initialRDD",
"=",
"None",
")",
":",
"if",
"numPartitions",
"is",
"None",
":",
"numPartitions",
"=",
"self",
".",
"_sc",
".",
"defaultParallelism",
"if",
"init... | Return a new "state" DStream where the state for each key is updated by applying
the given function on the previous state of the key and the new values of the key.
@param updateFunc: State update function. If this function returns None, then
corresponding state key-value pair... | [
"Return",
"a",
"new",
"state",
"DStream",
"where",
"the",
"state",
"for",
"each",
"key",
"is",
"updated",
"by",
"applying",
"the",
"given",
"function",
"on",
"the",
"previous",
"state",
"of",
"the",
"key",
"and",
"the",
"new",
"values",
"of",
"the",
"key... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L576-L608 | train | Returns a new DStream where the state for each key is updated by applying updateFunc on the previous state of the key and the new values of the key. | [
30522,
13366,
14409,
12259,
3762,
14839,
1006,
2969,
1010,
10651,
11263,
12273,
1010,
16371,
8737,
8445,
22753,
2015,
1027,
3904,
1010,
3988,
4103,
2094,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
2047,
1000,
2110,
1000,
16233,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/ops.py | BinGrouper.groups | def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {key: value for key, value in zip(self.binlabels, self.bins)
if key is not NaT}
return result | python | def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {key: value for key, value in zip(self.binlabels, self.bins)
if key is not NaT}
return result | [
"def",
"groups",
"(",
"self",
")",
":",
"# this is mainly for compat",
"# GH 3881",
"result",
"=",
"{",
"key",
":",
"value",
"for",
"key",
",",
"value",
"in",
"zip",
"(",
"self",
".",
"binlabels",
",",
"self",
".",
"bins",
")",
"if",
"key",
"is",
"not"... | dict {group name -> group labels} | [
"dict",
"{",
"group",
"name",
"-",
">",
"group",
"labels",
"}"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L698-L705 | train | dict of group name - > group labels | [
30522,
13366,
2967,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
4487,
6593,
1063,
2177,
2171,
1011,
1028,
2177,
10873,
1065,
1000,
1000,
1000,
1001,
2023,
2003,
3701,
2005,
4012,
4502,
2102,
1001,
1043,
2232,
4229,
2620,
2487,
2765,
1027,
1... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/categorical.py | recode_for_groupby | def recode_for_groupby(c, sort, observed):
"""
Code the categories to ensure we can groupby for categoricals.
If observed=True, we return a new Categorical with the observed
categories only.
If sort=False, return a copy of self, coded with categories as
returned by .unique(), followed by any c... | python | def recode_for_groupby(c, sort, observed):
"""
Code the categories to ensure we can groupby for categoricals.
If observed=True, we return a new Categorical with the observed
categories only.
If sort=False, return a copy of self, coded with categories as
returned by .unique(), followed by any c... | [
"def",
"recode_for_groupby",
"(",
"c",
",",
"sort",
",",
"observed",
")",
":",
"# we only care about observed values",
"if",
"observed",
":",
"unique_codes",
"=",
"unique1d",
"(",
"c",
".",
"codes",
")",
"take_codes",
"=",
"unique_codes",
"[",
"unique_codes",
"!... | Code the categories to ensure we can groupby for categoricals.
If observed=True, we return a new Categorical with the observed
categories only.
If sort=False, return a copy of self, coded with categories as
returned by .unique(), followed by any categories not appearing in
the data. If sort=True, ... | [
"Code",
"the",
"categories",
"to",
"ensure",
"we",
"can",
"groupby",
"for",
"categoricals",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/categorical.py#L8-L74 | train | Recode the categories of a categorical object. | [
30522,
13366,
28667,
10244,
1035,
2005,
1035,
2177,
3762,
1006,
1039,
1010,
4066,
1010,
5159,
1007,
1024,
1000,
1000,
1000,
3642,
1996,
7236,
2000,
5676,
2057,
2064,
2177,
3762,
2005,
4937,
27203,
2015,
1012,
2065,
5159,
1027,
2995,
1010,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | size | def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(... | python | def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(... | [
"def",
"size",
"(",
"col",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"size",
"(",
"_to_java_column",
"(",
"col",
")",
")",
")"
] | Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)] | [
"Collection",
"function",
":",
"returns",
"the",
"length",
"of",
"the",
"array",
"or",
"map",
"stored",
"in",
"the",
"column",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2472-L2483 | train | Returns the length of the array or map stored in the column. | [
30522,
13366,
2946,
1006,
8902,
1007,
1024,
1000,
1000,
1000,
3074,
3853,
1024,
5651,
1996,
3091,
1997,
1996,
9140,
2030,
4949,
8250,
1999,
1996,
5930,
1012,
1024,
11498,
2213,
8902,
1024,
2171,
1997,
5930,
2030,
3670,
1028,
1028,
1028,
1... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/missing.py | interpolate_2d | def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# resha... | python | def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# resha... | [
"def",
"interpolate_2d",
"(",
"values",
",",
"method",
"=",
"'pad'",
",",
"axis",
"=",
"0",
",",
"limit",
"=",
"None",
",",
"fill_value",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"transf",
"=",
"(",
"lambda",
"x",
":",
"x",
")",
"if",
"a... | Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result. | [
"Perform",
"an",
"actual",
"interpolation",
"of",
"values",
"values",
"will",
"be",
"make",
"2",
"-",
"d",
"if",
"needed",
"fills",
"inplace",
"returns",
"the",
"result",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L408-L442 | train | Perform an actual interpolation of values on a 2 - d array. | [
30522,
13366,
6970,
18155,
3686,
1035,
14134,
1006,
5300,
1010,
4118,
1027,
1005,
11687,
1005,
1010,
8123,
1027,
1014,
1010,
5787,
1027,
3904,
1010,
6039,
1035,
3643,
1027,
3904,
1010,
26718,
18863,
1027,
3904,
1007,
1024,
1000,
1000,
1000,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/taskcontext.py | _load_from_socket | def _load_from_socket(port, auth_secret):
"""
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
"""
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
# The barrier() call may block forever, so no timeout
sock.sett... | python | def _load_from_socket(port, auth_secret):
"""
Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed.
"""
(sockfile, sock) = local_connect_and_auth(port, auth_secret)
# The barrier() call may block forever, so no timeout
sock.sett... | [
"def",
"_load_from_socket",
"(",
"port",
",",
"auth_secret",
")",
":",
"(",
"sockfile",
",",
"sock",
")",
"=",
"local_connect_and_auth",
"(",
"port",
",",
"auth_secret",
")",
"# The barrier() call may block forever, so no timeout",
"sock",
".",
"settimeout",
"(",
"N... | Load data from a given socket, this is a blocking method thus only return when the socket
connection has been closed. | [
"Load",
"data",
"from",
"a",
"given",
"socket",
"this",
"is",
"a",
"blocking",
"method",
"thus",
"only",
"return",
"when",
"the",
"socket",
"connection",
"has",
"been",
"closed",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/taskcontext.py#L102-L121 | train | Load data from a given socket. | [
30522,
13366,
1035,
7170,
1035,
2013,
1035,
22278,
1006,
3417,
1010,
8740,
2705,
1035,
3595,
1007,
1024,
1000,
1000,
1000,
7170,
2951,
2013,
1037,
2445,
22278,
1010,
2023,
2003,
1037,
10851,
4118,
2947,
2069,
2709,
2043,
1996,
22278,
4434,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/column.py | _to_seq | def _to_seq(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM Seq of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toSeq(c... | python | def _to_seq(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM Seq of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toSeq(c... | [
"def",
"_to_seq",
"(",
"sc",
",",
"cols",
",",
"converter",
"=",
"None",
")",
":",
"if",
"converter",
":",
"cols",
"=",
"[",
"converter",
"(",
"c",
")",
"for",
"c",
"in",
"cols",
"]",
"return",
"sc",
".",
"_jvm",
".",
"PythonUtils",
".",
"toSeq",
... | Convert a list of Column (or names) into a JVM Seq of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects. | [
"Convert",
"a",
"list",
"of",
"Column",
"(",
"or",
"names",
")",
"into",
"a",
"JVM",
"Seq",
"of",
"Column",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L57-L66 | train | Convert a list of Column names or names into a JVM Seq of Column objects. | [
30522,
13366,
1035,
2000,
1035,
7367,
4160,
1006,
8040,
1010,
8902,
2015,
1010,
10463,
2121,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
10463,
1037,
2862,
1997,
5930,
1006,
2030,
3415,
1007,
2046,
1037,
1046,
2615,
2213,
7367,
4160,
1997,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | unix_timestamp | def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current t... | python | def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current t... | [
"def",
"unix_timestamp",
"(",
"timestamp",
"=",
"None",
",",
"format",
"=",
"'yyyy-MM-dd HH:mm:ss'",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"if",
"timestamp",
"is",
"None",
":",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"fu... | Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_A... | [
"Convert",
"time",
"string",
"with",
"given",
"pattern",
"(",
"yyyy",
"-",
"MM",
"-",
"dd",
"HH",
":",
"mm",
":",
"ss",
"by",
"default",
")",
"to",
"Unix",
"time",
"stamp",
"(",
"in",
"seconds",
")",
"using",
"the",
"default",
"timezone",
"and",
"the... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1262-L1279 | train | Convert time string with given pattern ( YYYY - MM - dd HH : mm : ss to Unix time stamp. | [
30522,
13366,
19998,
1035,
2335,
15464,
2361,
1006,
2335,
15464,
2361,
1027,
3904,
1010,
4289,
1027,
1005,
1061,
2100,
2100,
2100,
1011,
3461,
1011,
20315,
1044,
2232,
1024,
3461,
1024,
7020,
1005,
1007,
1024,
1000,
1000,
1000,
10463,
2051,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/regression.py | IsotonicRegression.train | def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
... | python | def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
... | [
"def",
"train",
"(",
"cls",
",",
"data",
",",
"isotonic",
"=",
"True",
")",
":",
"boundaries",
",",
"predictions",
"=",
"callMLlibFunc",
"(",
"\"trainIsotonicRegressionModel\"",
",",
"data",
".",
"map",
"(",
"_convert_to_vector",
")",
",",
"bool",
"(",
"isot... | Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True) | [
"Train",
"an",
"isotonic",
"regression",
"model",
"on",
"the",
"given",
"data",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/regression.py#L699-L711 | train | Train an isotonic regression model on the given data. | [
30522,
13366,
3345,
1006,
18856,
2015,
1010,
2951,
1010,
11163,
25009,
1027,
2995,
1007,
1024,
1000,
1000,
1000,
3345,
2019,
11163,
25009,
26237,
2944,
2006,
1996,
2445,
2951,
1012,
1024,
11498,
2213,
2951,
1024,
16428,
2094,
1997,
1006,
38... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/panel.py | Panel.from_dict | def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects.
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orie... | python | def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects.
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orie... | [
"def",
"from_dict",
"(",
"cls",
",",
"data",
",",
"intersect",
"=",
"False",
",",
"orient",
"=",
"'items'",
",",
"dtype",
"=",
"None",
")",
":",
"from",
"collections",
"import",
"defaultdict",
"orient",
"=",
"orient",
".",
"lower",
"(",
")",
"if",
"ori... | Construct Panel from dict of DataFrame objects.
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the ... | [
"Construct",
"Panel",
"from",
"dict",
"of",
"DataFrame",
"objects",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L239-L279 | train | Construct a new Panel object from a dict of DataFrame objects. | [
30522,
13366,
2013,
1035,
4487,
6593,
1006,
18856,
2015,
1010,
2951,
1010,
29261,
1027,
6270,
1010,
16865,
1027,
1005,
5167,
1005,
1010,
26718,
18863,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
9570,
5997,
2013,
4487,
6593,
1997,
2951,
15643... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/feature.py | Word2VecModel.load | def load(cls, sc, path):
"""
Load a model from the given path.
"""
jmodel = sc._jvm.org.apache.spark.mllib.feature \
.Word2VecModel.load(sc._jsc.sc(), path)
model = sc._jvm.org.apache.spark.mllib.api.python.Word2VecModelWrapper(jmodel)
return Word2VecModel(mod... | python | def load(cls, sc, path):
"""
Load a model from the given path.
"""
jmodel = sc._jvm.org.apache.spark.mllib.feature \
.Word2VecModel.load(sc._jsc.sc(), path)
model = sc._jvm.org.apache.spark.mllib.api.python.Word2VecModelWrapper(jmodel)
return Word2VecModel(mod... | [
"def",
"load",
"(",
"cls",
",",
"sc",
",",
"path",
")",
":",
"jmodel",
"=",
"sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
"spark",
".",
"mllib",
".",
"feature",
".",
"Word2VecModel",
".",
"load",
"(",
"sc",
".",
"_jsc",
".",
"sc",
"(",
")"... | Load a model from the given path. | [
"Load",
"a",
"model",
"from",
"the",
"given",
"path",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/feature.py#L635-L642 | train | Loads a word2vec model from the given path. | [
30522,
13366,
7170,
1006,
18856,
2015,
1010,
8040,
1010,
4130,
1007,
1024,
1000,
1000,
1000,
7170,
1037,
2944,
2013,
1996,
2445,
4130,
1012,
1000,
1000,
1000,
1046,
5302,
9247,
1027,
8040,
1012,
1035,
1046,
2615,
2213,
1012,
8917,
1012,
1... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.withWatermark | def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a give... | python | def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a give... | [
"def",
"withWatermark",
"(",
"self",
",",
"eventTime",
",",
"delayThreshold",
")",
":",
"if",
"not",
"eventTime",
"or",
"type",
"(",
"eventTime",
")",
"is",
"not",
"str",
":",
"raise",
"TypeError",
"(",
"\"eventTime should be provided as a string\"",
")",
"if",
... | Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emit... | [
"Defines",
"an",
"event",
"time",
"watermark",
"for",
"this",
":",
"class",
":",
"DataFrame",
".",
"A",
"watermark",
"tracks",
"a",
"point",
"in",
"time",
"before",
"which",
"we",
"assume",
"no",
"more",
"late",
"data",
"is",
"going",
"to",
"arrive",
"."... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L434-L465 | train | Defines an event time watermark for this DataFrame. | [
30522,
13366,
2007,
5880,
10665,
1006,
2969,
1010,
2724,
7292,
1010,
8536,
2705,
21898,
11614,
1007,
1024,
1000,
1000,
1000,
11859,
2019,
2724,
2051,
2300,
10665,
2005,
2023,
1024,
2465,
1024,
1036,
2951,
15643,
1036,
1012,
1037,
2300,
1066... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.treeAggregate | def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, ... | python | def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, ... | [
"def",
"treeAggregate",
"(",
"self",
",",
"zeroValue",
",",
"seqOp",
",",
"combOp",
",",
"depth",
"=",
"2",
")",
":",
"if",
"depth",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Depth cannot be smaller than 1 but got %d.\"",
"%",
"depth",
")",
"if",
"self",... | Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.tr... | [
"Aggregates",
"the",
"elements",
"of",
"this",
"RDD",
"in",
"a",
"multi",
"-",
"level",
"tree",
"pattern",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L957-L1007 | train | This function aggregates the elements of this RDD in a multi - level tree. | [
30522,
13366,
3392,
8490,
17603,
5867,
1006,
2969,
1010,
5717,
10175,
5657,
1010,
7367,
4160,
7361,
30524,
1028,
1028,
1028,
5587,
1027,
23375,
1060,
1010,
1061,
1024,
1060,
1009,
1061,
1028,
1028,
1028,
16428,
2094,
1027,
8040,
1012,
5903,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.sortByKey | def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.p... | python | def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.p... | [
"def",
"sortByKey",
"(",
"self",
",",
"ascending",
"=",
"True",
",",
"numPartitions",
"=",
"None",
",",
"keyfunc",
"=",
"lambda",
"x",
":",
"x",
")",
":",
"if",
"numPartitions",
"is",
"None",
":",
"numPartitions",
"=",
"self",
".",
"_defaultReducePartition... | Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d'... | [
"Sorts",
"this",
"RDD",
"which",
"is",
"assumed",
"to",
"consist",
"of",
"(",
"key",
"value",
")",
"pairs",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L635-L689 | train | Sorts this RDD by key. | [
30522,
13366,
4066,
3762,
14839,
1006,
2969,
1010,
22316,
1027,
2995,
1010,
16371,
8737,
8445,
22753,
2015,
1027,
3904,
1010,
3145,
11263,
12273,
1027,
23375,
1060,
1024,
1060,
1007,
1024,
1000,
1000,
1000,
11901,
2023,
16428,
2094,
1010,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/streaming.py | DataStreamReader.csv | def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFo... | python | def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFo... | [
"def",
"csv",
"(",
"self",
",",
"path",
",",
"schema",
"=",
"None",
",",
"sep",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"quote",
"=",
"None",
",",
"escape",
"=",
"None",
",",
"comment",
"=",
"None",
",",
"header",
"=",
"None",
",",
"infer... | r"""Loads a CSV file stream and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly usin... | [
"r",
"Loads",
"a",
"CSV",
"file",
"stream",
"and",
"returns",
"the",
"result",
"as",
"a",
":",
"class",
":",
"DataFrame",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L575-L705 | train | Loads a CSV file stream and returns a DataFrame. | [
30522,
13366,
20116,
2615,
1006,
2969,
1010,
4130,
1010,
8040,
28433,
1027,
3904,
1010,
19802,
1027,
3904,
1010,
17181,
1027,
3904,
1010,
14686,
1027,
3904,
1010,
4019,
1027,
3904,
1010,
7615,
1027,
3904,
1010,
20346,
1027,
3904,
1010,
1999... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
ageitgey/face_recognition | face_recognition/api.py | face_locations | def face_locations(img, number_of_times_to_upsample=1, model="hog"):
"""
Returns an array of bounding boxes of human faces in a image
:param img: An image (as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
... | python | def face_locations(img, number_of_times_to_upsample=1, model="hog"):
"""
Returns an array of bounding boxes of human faces in a image
:param img: An image (as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
... | [
"def",
"face_locations",
"(",
"img",
",",
"number_of_times_to_upsample",
"=",
"1",
",",
"model",
"=",
"\"hog\"",
")",
":",
"if",
"model",
"==",
"\"cnn\"",
":",
"return",
"[",
"_trim_css_to_bounds",
"(",
"_rect_to_css",
"(",
"face",
".",
"rect",
")",
",",
"... | Returns an array of bounding boxes of human faces in a image
:param img: An image (as a numpy array)
:param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces.
:param model: Which face detection model to use. "hog" is less accurate but fas... | [
"Returns",
"an",
"array",
"of",
"bounding",
"boxes",
"of",
"human",
"faces",
"in",
"a",
"image"
] | c96b010c02f15e8eeb0f71308c641179ac1f19bb | https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L108-L121 | train | Returns an array of bounding boxes of human faces in a image. | [
30522,
13366,
2227,
1035,
5269,
1006,
10047,
2290,
1010,
2193,
1035,
1997,
1035,
2335,
1035,
2000,
1035,
11139,
16613,
2571,
1027,
1015,
1010,
2944,
1027,
1000,
27589,
1000,
1007,
1024,
1000,
1000,
1000,
5651,
2019,
9140,
1997,
5391,
2075,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/streaming/dstream.py | DStream.pprint | def pprint(self, num=10):
"""
Print the first num elements of each RDD generated in this DStream.
@param num: the number of elements from the first will be printed.
"""
def takeAndPrint(time, rdd):
taken = rdd.take(num + 1)
print("------------------------... | python | def pprint(self, num=10):
"""
Print the first num elements of each RDD generated in this DStream.
@param num: the number of elements from the first will be printed.
"""
def takeAndPrint(time, rdd):
taken = rdd.take(num + 1)
print("------------------------... | [
"def",
"pprint",
"(",
"self",
",",
"num",
"=",
"10",
")",
":",
"def",
"takeAndPrint",
"(",
"time",
",",
"rdd",
")",
":",
"taken",
"=",
"rdd",
".",
"take",
"(",
"num",
"+",
"1",
")",
"print",
"(",
"\"-------------------------------------------\"",
")",
... | Print the first num elements of each RDD generated in this DStream.
@param num: the number of elements from the first will be printed. | [
"Print",
"the",
"first",
"num",
"elements",
"of",
"each",
"RDD",
"generated",
"in",
"this",
"DStream",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L166-L183 | train | Prints the first num elements of each RDD generated in this DStream. | [
30522,
13366,
4903,
6657,
2102,
1006,
2969,
1010,
16371,
2213,
1027,
2184,
1007,
1024,
1000,
1000,
1000,
6140,
1996,
2034,
16371,
2213,
3787,
1997,
2169,
16428,
2094,
7013,
1999,
2023,
16233,
25379,
1012,
1030,
11498,
2213,
16371,
2213,
102... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/types.py | _has_nulltype | def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_... | python | def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_... | [
"def",
"_has_nulltype",
"(",
"dt",
")",
":",
"if",
"isinstance",
"(",
"dt",
",",
"StructType",
")",
":",
"return",
"any",
"(",
"_has_nulltype",
"(",
"f",
".",
"dataType",
")",
"for",
"f",
"in",
"dt",
".",
"fields",
")",
"elif",
"isinstance",
"(",
"dt... | Return whether there is NullType in `dt` or not | [
"Return",
"whether",
"there",
"is",
"NullType",
"in",
"dt",
"or",
"not"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1068-L1077 | train | Return whether there is NullType in dt | [
30522,
13366,
1035,
2038,
1035,
19701,
13874,
1006,
26718,
1007,
1024,
1000,
1000,
1000,
2709,
3251,
2045,
2003,
19701,
13874,
1999,
1036,
26718,
1036,
2030,
2025,
1000,
1000,
1000,
2065,
2003,
7076,
26897,
1006,
26718,
1010,
2358,
6820,
65... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/profiler.py | BasicProfiler.profile | def profile(self, func):
""" Runs and profiles the method to_profile passed in. A profile object is returned. """
pr = cProfile.Profile()
pr.runcall(func)
st = pstats.Stats(pr)
st.stream = None # make it picklable
st.strip_dirs()
# Adds a new profile to the exis... | python | def profile(self, func):
""" Runs and profiles the method to_profile passed in. A profile object is returned. """
pr = cProfile.Profile()
pr.runcall(func)
st = pstats.Stats(pr)
st.stream = None # make it picklable
st.strip_dirs()
# Adds a new profile to the exis... | [
"def",
"profile",
"(",
"self",
",",
"func",
")",
":",
"pr",
"=",
"cProfile",
".",
"Profile",
"(",
")",
"pr",
".",
"runcall",
"(",
"func",
")",
"st",
"=",
"pstats",
".",
"Stats",
"(",
"pr",
")",
"st",
".",
"stream",
"=",
"None",
"# make it picklable... | Runs and profiles the method to_profile passed in. A profile object is returned. | [
"Runs",
"and",
"profiles",
"the",
"method",
"to_profile",
"passed",
"in",
".",
"A",
"profile",
"object",
"is",
"returned",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L158-L167 | train | Runs and profiles the method passed in. A profile object is returned. | [
30522,
13366,
6337,
1006,
2969,
1010,
4569,
2278,
1007,
1024,
1000,
1000,
1000,
3216,
1998,
17879,
1996,
4118,
2000,
1035,
6337,
2979,
1999,
1012,
1037,
6337,
4874,
2003,
2513,
1012,
1000,
1000,
1000,
10975,
1027,
18133,
3217,
8873,
2571,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.argsort | def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy... | python | def argsort(self, *args, **kwargs):
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy... | [
"def",
"argsort",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"self",
".",
"asi8",
"if",
"result",
"is",
"None",
":",
"result",
"=",
"np",
".",
"array",
"(",
"self",
")",
"return",
"result",
".",
"argsort",
"(... | Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the... | [
"Return",
"the",
"integer",
"indices",
"that",
"would",
"sort",
"the",
"index",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4295-L4333 | train | Return the integer indices that would sort the index. | [
30522,
13366,
12098,
5620,
11589,
1006,
2969,
1010,
1008,
12098,
5620,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
2709,
1996,
16109,
29299,
2008,
2052,
4066,
1996,
5950,
1012,
11709,
1011,
1011,
1011,
1011,
1011,
1011... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/groupby.py | GroupBy.mean | def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B'... | python | def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B'... | [
"def",
"mean",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_groupby_func",
"(",
"'mean'",
",",
"args",
",",
"kwargs",
",",
"[",
"'numeric_only'",
"]",
")",
"try",
":",
"return",
"self",
".",
"_cython_agg_genera... | Compute mean of groups, excluding missing values.
Returns
-------
pandas.Series or pandas.DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... ... | [
"Compute",
"mean",
"of",
"groups",
"excluding",
"missing",
"values",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1102-L1155 | train | Compute the mean of the elements of the current DataFrame. | [
30522,
13366,
2812,
1006,
2969,
1010,
1008,
12098,
5620,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
24134,
2812,
1997,
2967,
1010,
13343,
4394,
5300,
1012,
5651,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
25462,
2015,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/clustering.py | GaussianMixtureModel.summary | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
... | python | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
... | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"hasSummary",
":",
"return",
"GaussianMixtureSummary",
"(",
"super",
"(",
"GaussianMixtureModel",
",",
"self",
")",
".",
"summary",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No training summa... | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. | [
"Gets",
"summary",
"(",
"e",
".",
"g",
".",
"cluster",
"assignments",
"cluster",
"sizes",
")",
"of",
"the",
"model",
"trained",
"on",
"the",
"training",
"set",
".",
"An",
"exception",
"is",
"thrown",
"if",
"no",
"summary",
"exists",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/clustering.py#L129-L138 | train | Returns the summary of the current model. | [
30522,
13366,
12654,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
4152,
12654,
1006,
1041,
1012,
1043,
1012,
9324,
14799,
1010,
9324,
10826,
1007,
1997,
1996,
2944,
4738,
2006,
1996,
2731,
2275,
1012,
2019,
6453,
30524,
11721,
17854,
2937,
432... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/groupby.py | GroupBy.cummin | def cummin(self, axis=0, **kwargs):
"""
Cumulative min for each group.
"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False) | python | def cummin(self, axis=0, **kwargs):
"""
Cumulative min for each group.
"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False) | [
"def",
"cummin",
"(",
"self",
",",
"axis",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"axis",
"!=",
"0",
":",
"return",
"self",
".",
"apply",
"(",
"lambda",
"x",
":",
"np",
".",
"minimum",
".",
"accumulate",
"(",
"x",
",",
"axis",
")",
... | Cumulative min for each group. | [
"Cumulative",
"min",
"for",
"each",
"group",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1964-L1971 | train | Cumulative min for each group. | [
30522,
13366,
13988,
10020,
1006,
2969,
1010,
8123,
1027,
1014,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
23260,
8117,
2005,
2169,
2177,
1012,
1000,
1000,
1000,
2065,
8123,
999,
1027,
1014,
1024,
2709,
2969,
1012,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/readwriter.py | DataFrameWriter.option | def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition va... | python | def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition va... | [
"def",
"option",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"_jwrite",
"=",
"self",
".",
"_jwrite",
".",
"option",
"(",
"key",
",",
"to_str",
"(",
"value",
")",
")",
"return",
"self"
] | Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it... | [
"Adds",
"an",
"output",
"option",
"for",
"the",
"underlying",
"data",
"source",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L620-L629 | train | Adds an option to the underlying data source. | [
30522,
13366,
5724,
1006,
2969,
1010,
3145,
1010,
3643,
1007,
1024,
1000,
1000,
1000,
9909,
2019,
6434,
5724,
2005,
1996,
10318,
2951,
3120,
1012,
2017,
2064,
2275,
1996,
2206,
5724,
1006,
1055,
1007,
2005,
3015,
6764,
1024,
1008,
1036,
1... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/shuffle.py | ExternalMerger._get_spill_dir | def _get_spill_dir(self, n):
""" Choose one directory for spill by number n """
return os.path.join(self.localdirs[n % len(self.localdirs)], str(n)) | python | def _get_spill_dir(self, n):
""" Choose one directory for spill by number n """
return os.path.join(self.localdirs[n % len(self.localdirs)], str(n)) | [
"def",
"_get_spill_dir",
"(",
"self",
",",
"n",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"localdirs",
"[",
"n",
"%",
"len",
"(",
"self",
".",
"localdirs",
")",
"]",
",",
"str",
"(",
"n",
")",
")"
] | Choose one directory for spill by number n | [
"Choose",
"one",
"directory",
"for",
"spill",
"by",
"number",
"n"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L219-L221 | train | Choose one directory for spill by number n | [
30522,
13366,
1035,
2131,
1035,
14437,
1035,
16101,
1006,
2969,
1010,
1050,
1007,
1024,
1000,
1000,
1000,
5454,
2028,
14176,
2005,
14437,
2011,
2193,
1050,
1000,
1000,
1000,
2709,
9808,
1012,
4130,
1012,
3693,
1006,
2969,
1012,
2334,
4305,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/param/__init__.py | Params._resolveParam | def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
... | python | def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
... | [
"def",
"_resolveParam",
"(",
"self",
",",
"param",
")",
":",
"if",
"isinstance",
"(",
"param",
",",
"Param",
")",
":",
"self",
".",
"_shouldOwn",
"(",
"param",
")",
"return",
"param",
"elif",
"isinstance",
"(",
"param",
",",
"basestring",
")",
":",
"re... | Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance | [
"Resolves",
"a",
"param",
"and",
"validates",
"the",
"ownership",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L405-L419 | train | Resolves a param and validates the ownership. | [
30522,
13366,
1035,
10663,
28689,
2213,
1006,
2969,
1010,
11498,
2213,
1007,
1024,
1000,
1000,
1000,
10663,
2015,
1037,
30524,
11498,
2213,
2171,
2030,
1996,
11498,
2213,
6013,
1010,
2029,
2442,
7141,
2000,
2023,
11498,
5244,
6013,
1024,
27... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/missing.py | interpolate_1d | def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
limit_direction='forward', limit_area=None, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each... | python | def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
limit_direction='forward', limit_area=None, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each... | [
"def",
"interpolate_1d",
"(",
"xvalues",
",",
"yvalues",
",",
"method",
"=",
"'linear'",
",",
"limit",
"=",
"None",
",",
"limit_direction",
"=",
"'forward'",
",",
"limit_area",
"=",
"None",
",",
"fill_value",
"=",
"None",
",",
"bounds_error",
"=",
"False",
... | Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument. | [
"Logic",
"for",
"the",
"1",
"-",
"d",
"interpolation",
".",
"The",
"result",
"should",
"be",
"1",
"-",
"d",
"inputs",
"xvalues",
"and",
"yvalues",
"will",
"each",
"be",
"1",
"-",
"d",
"arrays",
"of",
"the",
"same",
"length",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L109-L239 | train | Interpolate two 1 - d arrays of xvalues and yvalues. | [
30522,
13366,
6970,
18155,
3686,
1035,
1015,
2094,
1006,
15566,
2389,
15808,
1010,
1061,
10175,
15808,
1010,
4118,
1027,
1005,
7399,
1005,
1010,
5787,
1027,
3904,
1010,
5787,
1035,
3257,
1027,
1005,
2830,
1005,
1010,
5787,
1035,
2181,
1027,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/udf.py | UDFRegistration.register | def register(self, name, f, returnType=None):
"""Register a Python function (including lambda function) or a user-defined function
as a SQL function.
:param name: name of the user-defined function in SQL statements.
:param f: a Python function, or a user-defined function. The user-defin... | python | def register(self, name, f, returnType=None):
"""Register a Python function (including lambda function) or a user-defined function
as a SQL function.
:param name: name of the user-defined function in SQL statements.
:param f: a Python function, or a user-defined function. The user-defin... | [
"def",
"register",
"(",
"self",
",",
"name",
",",
"f",
",",
"returnType",
"=",
"None",
")",
":",
"# This is to check whether the input function is from a user-defined function or",
"# Python function.",
"if",
"hasattr",
"(",
"f",
",",
"'asNondeterministic'",
")",
":",
... | Register a Python function (including lambda function) or a user-defined function
as a SQL function.
:param name: name of the user-defined function in SQL statements.
:param f: a Python function, or a user-defined function. The user-defined function can
be either row-at-a-time or ve... | [
"Register",
"a",
"Python",
"function",
"(",
"including",
"lambda",
"function",
")",
"or",
"a",
"user",
"-",
"defined",
"function",
"as",
"a",
"SQL",
"function",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/udf.py#L232-L341 | train | Registers a Python function or a user - defined function with the specified name. | [
30522,
13366,
4236,
1006,
2969,
1010,
2171,
1010,
1042,
1010,
2709,
13874,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
4236,
1037,
18750,
3853,
1006,
2164,
23375,
3853,
1007,
2030,
1037,
5310,
1011,
4225,
3853,
2004,
1037,
29296,
3853,
1012,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/_config/config.py | config_prefix | def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.... | python | def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.... | [
"def",
"config_prefix",
"(",
"prefix",
")",
":",
"# Note: reset_option relies on set_option, and on key directly",
"# it does not fit in to this monkey-patching scheme",
"global",
"register_option",
",",
"get_option",
",",
"set_option",
",",
"reset_option",
"def",
"wrap",
"(",
... | contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import pandas.... | [
"contextmanager",
"for",
"multiple",
"invocations",
"of",
"API",
"with",
"a",
"common",
"prefix"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/config.py#L696-L741 | train | Contextmanager for multiple API calls with a common prefix | [
30522,
13366,
9530,
8873,
2290,
1035,
17576,
1006,
17576,
1007,
1024,
1000,
1000,
1000,
6123,
24805,
4590,
2005,
3674,
1999,
19152,
2015,
1997,
17928,
2007,
1037,
2691,
17576,
3569,
17928,
4972,
1024,
1006,
4236,
1013,
2131,
1013,
2275,
100... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/feature.py | Word2VecModel.findSynonyms | def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(wor... | python | def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(wor... | [
"def",
"findSynonyms",
"(",
"self",
",",
"word",
",",
"num",
")",
":",
"if",
"not",
"isinstance",
"(",
"word",
",",
"basestring",
")",
":",
"word",
"=",
"_convert_to_vector",
"(",
"word",
")",
"return",
"self",
".",
"_call_java",
"(",
"\"findSynonyms\"",
... | Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity). | [
"Find",
"num",
"number",
"of",
"words",
"closest",
"in",
"similarity",
"to",
"word",
".",
"word",
"can",
"be",
"a",
"string",
"or",
"vector",
"representation",
".",
"Returns",
"a",
"dataframe",
"with",
"two",
"fields",
"word",
"and",
"similarity",
"(",
"wh... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/feature.py#L3293-L3302 | train | Find num number of words closest in similarity to word. | [
30522,
13366,
4858,
6038,
16585,
5244,
1006,
2969,
1010,
2773,
1010,
16371,
2213,
1007,
1024,
1000,
1000,
1000,
2424,
1000,
16371,
2213,
1000,
2193,
1997,
2616,
7541,
1999,
14402,
2000,
1000,
2773,
1000,
1012,
2773,
2064,
2022,
1037,
5164,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.slice_locs | def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the... | python | def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the... | [
"def",
"slice_locs",
"(",
"self",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"step",
"=",
"None",
",",
"kind",
"=",
"None",
")",
":",
"inc",
"=",
"(",
"step",
"is",
"None",
"or",
"step",
">=",
"0",
")",
"if",
"not",
"inc",
":",
... | Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind... | [
"Compute",
"slice",
"locations",
"for",
"input",
"labels",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4824-L4913 | train | Compute slice locations for a single label. | [
30522,
13366,
14704,
1035,
8840,
6169,
1006,
2969,
1010,
2707,
1027,
3904,
1010,
2203,
1027,
3904,
1010,
3357,
1027,
3904,
1010,
2785,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
24134,
14704,
5269,
2005,
7953,
10873,
1012,
11709,
1011,
1011,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._invalid_indexer | def _invalid_indexer(self, form, key):
"""
Consistent invalid indexer message.
"""
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
... | python | def _invalid_indexer(self, form, key):
"""
Consistent invalid indexer message.
"""
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
... | [
"def",
"_invalid_indexer",
"(",
"self",
",",
"form",
",",
"key",
")",
":",
"raise",
"TypeError",
"(",
"\"cannot do {form} indexing on {klass} with these \"",
"\"indexers [{key}] of {kind}\"",
".",
"format",
"(",
"form",
"=",
"form",
",",
"klass",
"=",
"type",
"(",
... | Consistent invalid indexer message. | [
"Consistent",
"invalid",
"indexer",
"message",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3057-L3064 | train | Raises a TypeError if the key is not a valid index. | [
30522,
13366,
1035,
19528,
1035,
5950,
2121,
1006,
2969,
1010,
2433,
1010,
3145,
1007,
1024,
1000,
1000,
1000,
8335,
19528,
5950,
2121,
4471,
1012,
1000,
1000,
1000,
5333,
2828,
2121,
29165,
1006,
1000,
3685,
2079,
1063,
2433,
1065,
5950,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | portable_hash | def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.ve... | python | def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.ve... | [
"def",
"portable_hash",
"(",
"x",
")",
":",
"if",
"sys",
".",
"version_info",
">=",
"(",
"3",
",",
"2",
",",
"3",
")",
"and",
"'PYTHONHASHSEED'",
"not",
"in",
"os",
".",
"environ",
":",
"raise",
"Exception",
"(",
"\"Randomness of hash of string should be dis... | This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521 | [
"This",
"function",
"returns",
"consistent",
"hash",
"code",
"for",
"builtin",
"types",
"especially",
"for",
"None",
"and",
"tuple",
"with",
"None",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L78-L106 | train | This function returns consistent hash code for builtin types and tuple with None. | [
30522,
13366,
12109,
1035,
23325,
1006,
1060,
1007,
1024,
1000,
1000,
1000,
2023,
3853,
5651,
8335,
23325,
3642,
2005,
2328,
2378,
4127,
1010,
2926,
2005,
3904,
1998,
10722,
10814,
2007,
3904,
1012,
1996,
9896,
2003,
2714,
2000,
2008,
2028,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/param/__init__.py | Params.isSet | def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap | python | def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap | [
"def",
"isSet",
"(",
"self",
",",
"param",
")",
":",
"param",
"=",
"self",
".",
"_resolveParam",
"(",
"param",
")",
"return",
"param",
"in",
"self",
".",
"_paramMap"
] | Checks whether a param is explicitly set by user. | [
"Checks",
"whether",
"a",
"param",
"is",
"explicitly",
"set",
"by",
"user",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L307-L312 | train | Checks whether a param is explicitly set by user. | [
30522,
13366,
26354,
3388,
1006,
2969,
1010,
11498,
2213,
1007,
1024,
1000,
1000,
1000,
14148,
3251,
1037,
11498,
2213,
2003,
12045,
2275,
2011,
5310,
1012,
1000,
1000,
1000,
11498,
2213,
1027,
2969,
1012,
1035,
10663,
28689,
2213,
1006,
11... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/streaming/dstream.py | DStream.transformWith | def transformWith(self, func, other, keepSerializer=False):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and 'other' DStream.
`func` can have two arguments of (`rdd_a`, `rdd_b`) or have three
arguments of (`time`, `rd... | python | def transformWith(self, func, other, keepSerializer=False):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and 'other' DStream.
`func` can have two arguments of (`rdd_a`, `rdd_b`) or have three
arguments of (`time`, `rd... | [
"def",
"transformWith",
"(",
"self",
",",
"func",
",",
"other",
",",
"keepSerializer",
"=",
"False",
")",
":",
"if",
"func",
".",
"__code__",
".",
"co_argcount",
"==",
"2",
":",
"oldfunc",
"=",
"func",
"func",
"=",
"lambda",
"t",
",",
"a",
",",
"b",
... | Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and 'other' DStream.
`func` can have two arguments of (`rdd_a`, `rdd_b`) or have three
arguments of (`time`, `rdd_a`, `rdd_b`) | [
"Return",
"a",
"new",
"DStream",
"in",
"which",
"each",
"RDD",
"is",
"generated",
"by",
"applying",
"a",
"function",
"on",
"each",
"RDD",
"of",
"this",
"DStream",
"and",
"other",
"DStream",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L301-L317 | train | Return a new DStream in which each RDD is generated by applying a functionon each RDD of this DStream and other DStreams. | [
30522,
13366,
10938,
24415,
1006,
2969,
1010,
4569,
2278,
1010,
2060,
1010,
7906,
11610,
28863,
1027,
6270,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
2047,
16233,
25379,
1999,
2029,
2169,
16428,
2094,
2003,
7013,
2011,
11243,
1037,
3853,
20... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._assert_take_fillable | def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
"""
Internal method to handle NA filling of take.
"""
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
... | python | def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
"""
Internal method to handle NA filling of take.
"""
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
... | [
"def",
"_assert_take_fillable",
"(",
"self",
",",
"values",
",",
"indices",
",",
"allow_fill",
"=",
"True",
",",
"fill_value",
"=",
"None",
",",
"na_value",
"=",
"np",
".",
"nan",
")",
":",
"indices",
"=",
"ensure_platform_int",
"(",
"indices",
")",
"# onl... | Internal method to handle NA filling of take. | [
"Internal",
"method",
"to",
"handle",
"NA",
"filling",
"of",
"take",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L803-L822 | train | Internal method to handle NA filling of take. | [
30522,
13366,
1035,
20865,
1035,
2202,
1035,
6039,
3085,
1006,
2969,
1010,
5300,
1010,
29299,
1010,
3499,
1035,
6039,
1027,
2995,
1010,
6039,
1035,
3643,
1027,
3904,
1010,
6583,
1035,
3643,
1027,
27937,
1012,
16660,
1007,
1024,
1000,
1000,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.