repo stringclasses 4
values | path stringlengths 20 56 | func_name stringlengths 3 41 | original_string stringlengths 140 10k | language stringclasses 1
value | code stringlengths 140 10k | code_tokens listlengths 22 929 | docstring stringlengths 18 8.4k | docstring_tokens listlengths 2 122 | sha stringclasses 4
values | url stringlengths 108 167 | partition stringclasses 1
value | summary stringlengths 17 285 | input_ids listlengths 502 502 | token_type_ids listlengths 502 502 | attention_mask listlengths 502 502 | labels listlengths 502 502 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
apache/spark | python/pyspark/sql/dataframe.py | DataFrame._jseq | def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter) | python | def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter) | [
"def",
"_jseq",
"(",
"self",
",",
"cols",
",",
"converter",
"=",
"None",
")",
":",
"return",
"_to_seq",
"(",
"self",
".",
"sql_ctx",
".",
"_sc",
",",
"cols",
",",
"converter",
")"
] | Return a JVM Seq of Columns from a list of Column or names | [
"Return",
"a",
"JVM",
"Seq",
"of",
"Columns",
"from",
"a",
"list",
"of",
"Column",
"or",
"names"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1097-L1099 | train | Return a JVM Seq of Columns from a list of Column names | [
30522,
13366,
1035,
1046,
3366,
4160,
1006,
2969,
1010,
8902,
2015,
1010,
10463,
2121,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
1046,
2615,
2213,
7367,
4160,
1997,
7753,
2013,
1037,
2862,
1997,
5930,
2030,
3415,
1000,
1000,
1... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | last | def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is ... | python | def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is ... | [
"def",
"last",
"(",
"col",
",",
"ignorenulls",
"=",
"False",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"jc",
"=",
"sc",
".",
"_jvm",
".",
"functions",
".",
"last",
"(",
"_to_java_column",
"(",
"col",
")",
",",
"ignorenulls",
")",... | Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results dep... | [
"Aggregate",
"function",
":",
"returns",
"the",
"last",
"value",
"in",
"a",
"group",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L527-L538 | train | Aggregate function that returns the last value in a group. | [
30522,
13366,
2197,
1006,
8902,
1010,
8568,
11231,
12718,
1027,
6270,
1007,
1024,
1000,
1000,
1000,
9572,
3853,
1024,
5651,
1996,
2197,
3643,
1999,
1037,
2177,
1012,
1996,
3853,
2011,
12398,
5651,
1996,
2197,
5300,
2009,
5927,
1012,
2009,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/groupby.py | GroupBy._fill | def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwar... | python | def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwar... | [
"def",
"_fill",
"(",
"self",
",",
"direction",
",",
"limit",
"=",
"None",
")",
":",
"# Need int value for Cython",
"if",
"limit",
"is",
"None",
":",
"limit",
"=",
"-",
"1",
"return",
"self",
".",
"_get_cythonized_result",
"(",
"'group_fillna_indexer'",
",",
... | Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to... | [
"Shared",
"function",
"for",
"pad",
"and",
"backfill",
"to",
"call",
"Cython",
"method",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1474-L1505 | train | Returns a Series or DataFrame with filled values for the specified order. | [
30522,
13366,
1035,
6039,
1006,
2969,
1010,
3257,
1010,
5787,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
4207,
3853,
2005,
1036,
11687,
1036,
1998,
1036,
2067,
8873,
3363,
1036,
2000,
2655,
22330,
2705,
2239,
4118,
1012,
11709,
1011,
1011,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | _trim_front | def _trim_front(strings):
"""
Trims zeros and decimal points.
"""
trimmed = strings
while len(strings) > 0 and all(x[0] == ' ' for x in trimmed):
trimmed = [x[1:] for x in trimmed]
return trimmed | python | def _trim_front(strings):
"""
Trims zeros and decimal points.
"""
trimmed = strings
while len(strings) > 0 and all(x[0] == ' ' for x in trimmed):
trimmed = [x[1:] for x in trimmed]
return trimmed | [
"def",
"_trim_front",
"(",
"strings",
")",
":",
"trimmed",
"=",
"strings",
"while",
"len",
"(",
"strings",
")",
">",
"0",
"and",
"all",
"(",
"x",
"[",
"0",
"]",
"==",
"' '",
"for",
"x",
"in",
"trimmed",
")",
":",
"trimmed",
"=",
"[",
"x",
"[",
... | Trims zeros and decimal points. | [
"Trims",
"zeros",
"and",
"decimal",
"points",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5393-L5400 | train | Trims zeros and decimal points. | [
30522,
13366,
1035,
12241,
1035,
2392,
1006,
7817,
1007,
1024,
1000,
1000,
1000,
12241,
2015,
5717,
2015,
1998,
26066,
2685,
1012,
1000,
1000,
1000,
21920,
1027,
7817,
2096,
18798,
1006,
7817,
1007,
1028,
1014,
1998,
2035,
1006,
1060,
1031,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | dev/merge_spark_pr.py | choose_jira_assignee | def choose_jira_assignee(issue, asf_jira):
"""
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
"""
while True:
try:
reporter = issue.fields.reporter
commentors = map(lambda x:... | python | def choose_jira_assignee(issue, asf_jira):
"""
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
"""
while True:
try:
reporter = issue.fields.reporter
commentors = map(lambda x:... | [
"def",
"choose_jira_assignee",
"(",
"issue",
",",
"asf_jira",
")",
":",
"while",
"True",
":",
"try",
":",
"reporter",
"=",
"issue",
".",
"fields",
".",
"reporter",
"commentors",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"author",
",",
"issue",
"."... | Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors | [
"Prompt",
"the",
"user",
"to",
"choose",
"who",
"to",
"assign",
"the",
"issue",
"to",
"in",
"jira",
"given",
"a",
"list",
"of",
"candidates",
"including",
"the",
"original",
"reporter",
"and",
"all",
"commentors"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/dev/merge_spark_pr.py#L325-L362 | train | Prompt the user to assign the issue to in jira. | [
30522,
13366,
5454,
1035,
10147,
2527,
1035,
23911,
4402,
1006,
3277,
1010,
2004,
2546,
1035,
10147,
2527,
1007,
1024,
1000,
1000,
1000,
25732,
1996,
5310,
2000,
5454,
2040,
2000,
23911,
1996,
3277,
2000,
1999,
10147,
2527,
1010,
2445,
1037... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/readwriter.py | DataFrameReader.options | def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values... | python | def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values... | [
"def",
"options",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"for",
"k",
"in",
"options",
":",
"self",
".",
"_jreader",
"=",
"self",
".",
"_jreader",
".",
"option",
"(",
"k",
",",
"to_str",
"(",
"options",
"[",
"k",
"]",
")",
")",
"return",... | Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it use... | [
"Adds",
"input",
"options",
"for",
"the",
"underlying",
"data",
"source",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L128-L138 | train | Adds input options for the underlying data source. | [
30522,
13366,
7047,
1006,
2969,
1010,
1008,
1008,
7047,
1007,
1024,
1000,
1000,
1000,
9909,
7953,
7047,
2005,
1996,
10318,
2951,
3120,
1012,
2017,
2064,
2275,
1996,
2206,
5724,
1006,
1055,
1007,
2005,
3752,
6764,
1024,
1008,
1036,
1036,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.rename | def rename(self, name, inplace=False):
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : label or list of labels
... | python | def rename(self, name, inplace=False):
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : label or list of labels
... | [
"def",
"rename",
"(",
"self",
",",
"name",
",",
"inplace",
"=",
"False",
")",
":",
"return",
"self",
".",
"set_names",
"(",
"[",
"name",
"]",
",",
"inplace",
"=",
"inplace",
")"
] | Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : label or list of labels
Name(s) to set.
inplace : boolean, default F... | [
"Alter",
"Index",
"or",
"MultiIndex",
"name",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1342-L1387 | train | A method to set the names of the current index or MultiIndex. | [
30522,
13366,
14916,
14074,
1006,
2969,
1010,
2171,
1010,
1999,
24759,
10732,
1027,
6270,
1007,
1024,
1000,
1000,
1000,
11477,
5950,
2030,
4800,
22254,
10288,
2171,
1012,
2583,
2000,
2275,
2047,
3415,
2302,
2504,
1012,
12398,
2015,
2000,
41... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.get_indexer_for | def get_indexer_for(self, target, **kwargs):
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_nonunique
as appropriate.
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ ... | python | def get_indexer_for(self, target, **kwargs):
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_nonunique
as appropriate.
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ ... | [
"def",
"get_indexer_for",
"(",
"self",
",",
"target",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"is_unique",
":",
"return",
"self",
".",
"get_indexer",
"(",
"target",
",",
"*",
"*",
"kwargs",
")",
"indexer",
",",
"_",
"=",
"self",
".",
... | Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_nonunique
as appropriate. | [
"Guaranteed",
"return",
"of",
"an",
"indexer",
"even",
"when",
"non",
"-",
"unique",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4440-L4450 | train | Returns an indexer for the given target. | [
30522,
13366,
2131,
1035,
5950,
2121,
1035,
2005,
1006,
2969,
1010,
4539,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
12361,
2709,
1997,
2019,
5950,
2121,
2130,
2043,
2512,
1011,
4310,
1012,
2023,
18365,
2229,
2000,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/types.py | UserDefinedType._cachedSqlType | def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type | python | def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type | [
"def",
"_cachedSqlType",
"(",
"cls",
")",
":",
"if",
"not",
"hasattr",
"(",
"cls",
",",
"\"_cached_sql_type\"",
")",
":",
"cls",
".",
"_cached_sql_type",
"=",
"cls",
".",
"sqlType",
"(",
")",
"return",
"cls",
".",
"_cached_sql_type"
] | Cache the sqlType() into class, because it's heavy used in `toInternal`. | [
"Cache",
"the",
"sqlType",
"()",
"into",
"class",
"because",
"it",
"s",
"heavy",
"used",
"in",
"toInternal",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L675-L681 | train | Cache the sqlType into class because it s heavy used in toInternal. | [
30522,
13366,
1035,
17053,
5104,
4160,
24228,
5051,
1006,
18856,
2015,
1007,
1024,
1000,
1000,
1000,
17053,
1996,
29296,
13874,
1006,
1007,
2046,
2465,
1010,
2138,
2009,
1005,
1055,
3082,
2109,
1999,
1036,
2000,
18447,
11795,
2389,
1036,
10... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/dtypes/inference.py | is_sequence | def is_sequence(obj):
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
... | python | def is_sequence(obj):
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
... | [
"def",
"is_sequence",
"(",
"obj",
")",
":",
"try",
":",
"iter",
"(",
"obj",
")",
"# Can iterate over it.",
"len",
"(",
"obj",
")",
"# Has a length associated with it.",
"return",
"not",
"isinstance",
"(",
"obj",
",",
"(",
"str",
",",
"bytes",
")",
")",
"ex... | Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>>... | [
"Check",
"if",
"the",
"object",
"is",
"a",
"sequence",
"of",
"objects",
".",
"String",
"types",
"are",
"not",
"included",
"as",
"sequences",
"here",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/inference.py#L462-L491 | train | Checks if the object is a sequence of objects. | [
30522,
13366,
2003,
1035,
5537,
1006,
27885,
3501,
1007,
1024,
1000,
1000,
1000,
4638,
2065,
1996,
4874,
2003,
1037,
5537,
1997,
5200,
1012,
5164,
4127,
2024,
2025,
2443,
2004,
10071,
2182,
1012,
11709,
1011,
1011,
1011,
1011,
1011,
1011,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/missing.py | _cast_values_for_fillna | def _cast_values_for_fillna(values, dtype):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if (is_datetime64_dtype(dtype) or is_datetime64tz_dty... | python | def _cast_values_for_fillna(values, dtype):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if (is_datetime64_dtype(dtype) or is_datetime64tz_dty... | [
"def",
"_cast_values_for_fillna",
"(",
"values",
",",
"dtype",
")",
":",
"# TODO: for int-dtypes we make a copy, but for everything else this",
"# alters the values in-place. Is this intentional?",
"if",
"(",
"is_datetime64_dtype",
"(",
"dtype",
")",
"or",
"is_datetime64tz_dtype"... | Cast values to a dtype that algos.pad and algos.backfill can handle. | [
"Cast",
"values",
"to",
"a",
"dtype",
"that",
"algos",
".",
"pad",
"and",
"algos",
".",
"backfill",
"can",
"handle",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L445-L460 | train | Cast values to a dtype that algos. pad and algos. backfill can handle. | [
30522,
13366,
1035,
3459,
1035,
5300,
1035,
2005,
1035,
6039,
2532,
1006,
5300,
1010,
26718,
18863,
1007,
1024,
1000,
1000,
1000,
3459,
5300,
2000,
1037,
26718,
18863,
2008,
2632,
12333,
1012,
11687,
1998,
2632,
12333,
1012,
2067,
8873,
336... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.is_ | def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to c... | python | def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to c... | [
"def",
"is_",
"(",
"self",
",",
"other",
")",
":",
"# use something other than None to be clearer",
"return",
"self",
".",
"_id",
"is",
"getattr",
"(",
"other",
",",
"'_id'",
",",
"Ellipsis",
")",
"and",
"self",
".",
"_id",
"is",
"not",
"None"
] | More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-... | [
"More",
"flexible",
"faster",
"check",
"like",
"is",
"but",
"that",
"works",
"through",
"views",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L614-L632 | train | Returns True if this object is the same underlying data as other. | [
30522,
13366,
2003,
1035,
1006,
2969,
1010,
2060,
1007,
1024,
1000,
1000,
1000,
2062,
12379,
1010,
5514,
4638,
2066,
1036,
1036,
2003,
1036,
1036,
2021,
2008,
2573,
2083,
5328,
1012,
3602,
1024,
2023,
2003,
1008,
2025,
1008,
1996,
2168,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/window.py | Rolling._validate_freq | def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} is not "
... | python | def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError("passed window {0} is not "
... | [
"def",
"_validate_freq",
"(",
"self",
")",
":",
"from",
"pandas",
".",
"tseries",
".",
"frequencies",
"import",
"to_offset",
"try",
":",
"return",
"to_offset",
"(",
"self",
".",
"window",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise... | Validate & return window frequency. | [
"Validate",
"&",
"return",
"window",
"frequency",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L1611-L1621 | train | Validate & return window frequency. | [
30522,
13366,
1035,
9398,
3686,
1035,
10424,
2063,
4160,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
9398,
3686,
1004,
2709,
3332,
6075,
1012,
1000,
1000,
1000,
2013,
25462,
2015,
1012,
24529,
28077,
1012,
13139,
12324,
2000,
1035,
16396,
304... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/linalg/distributed.py | IndexedRowMatrix.toBlockMatrix | def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024):
"""
Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given nu... | python | def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024):
"""
Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given nu... | [
"def",
"toBlockMatrix",
"(",
"self",
",",
"rowsPerBlock",
"=",
"1024",
",",
"colsPerBlock",
"=",
"1024",
")",
":",
"java_block_matrix",
"=",
"self",
".",
"_java_matrix_wrapper",
".",
"call",
"(",
"\"toBlockMatrix\"",
",",
"rowsPerBlock",
",",
"colsPerBlock",
")"... | Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given number of rows.
:param colsPerBlock: Number of columns that make up each bloc... | [
"Convert",
"this",
"matrix",
"to",
"a",
"BlockMatrix",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/distributed.py#L631-L658 | train | Convert this matrix to a BlockMatrix. | [
30522,
13366,
2000,
23467,
18900,
17682,
1006,
2969,
1010,
10281,
4842,
23467,
1027,
9402,
2549,
1010,
8902,
17668,
23467,
1027,
9402,
2549,
1007,
1024,
1000,
1000,
1000,
10463,
2023,
8185,
2000,
1037,
3796,
18900,
17682,
1012,
1024,
11498,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.insert | def insert(self, loc, item):
"""
Make new Index inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
... | python | def insert(self, loc, item):
"""
Make new Index inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
... | [
"def",
"insert",
"(",
"self",
",",
"loc",
",",
"item",
")",
":",
"_self",
"=",
"np",
".",
"asarray",
"(",
"self",
")",
"item",
"=",
"self",
".",
"_coerce_scalar_to_index",
"(",
"item",
")",
".",
"_ndarray_values",
"idx",
"=",
"np",
".",
"concatenate",
... | Make new Index inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index | [
"Make",
"new",
"Index",
"inserting",
"new",
"item",
"at",
"location",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4925-L4943 | train | Make new Index inserting new item at location. | [
30522,
13366,
19274,
1006,
2969,
1010,
8840,
2278,
1010,
8875,
1007,
1024,
1000,
1000,
1000,
2191,
2047,
5950,
19274,
2075,
2047,
8875,
2012,
3295,
1012,
4076,
18750,
2862,
1012,
10439,
10497,
28081,
2005,
4997,
5300,
1012,
11709,
1011,
101... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/profiler.py | ProfilerCollector.dump_profiles | def dump_profiles(self, path):
""" Dump the profile stats into directory `path` """
for id, profiler, _ in self.profilers:
profiler.dump(id, path)
self.profilers = [] | python | def dump_profiles(self, path):
""" Dump the profile stats into directory `path` """
for id, profiler, _ in self.profilers:
profiler.dump(id, path)
self.profilers = [] | [
"def",
"dump_profiles",
"(",
"self",
",",
"path",
")",
":",
"for",
"id",
",",
"profiler",
",",
"_",
"in",
"self",
".",
"profilers",
":",
"profiler",
".",
"dump",
"(",
"id",
",",
"path",
")",
"self",
".",
"profilers",
"=",
"[",
"]"
] | Dump the profile stats into directory `path` | [
"Dump",
"the",
"profile",
"stats",
"into",
"directory",
"path"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/profiler.py#L53-L57 | train | Dump the profile stats into a directory path. | [
30522,
13366,
15653,
1035,
17879,
1006,
2969,
1010,
4130,
1007,
1024,
1000,
1000,
1000,
15653,
1996,
6337,
26319,
2046,
14176,
1036,
4130,
1036,
1000,
1000,
1000,
2005,
8909,
1010,
6337,
2099,
1010,
1035,
1999,
2969,
1012,
6337,
2869,
1024,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.unionByName | def unionByName(self, other):
""" Returns a new :class:`DataFrame` containing union of rows in this and another frame.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`dis... | python | def unionByName(self, other):
""" Returns a new :class:`DataFrame` containing union of rows in this and another frame.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`dis... | [
"def",
"unionByName",
"(",
"self",
",",
"other",
")",
":",
"return",
"DataFrame",
"(",
"self",
".",
"_jdf",
".",
"unionByName",
"(",
"other",
".",
"_jdf",
")",
",",
"self",
".",
"sql_ctx",
")"
] | Returns a new :class:`DataFrame` containing union of rows in this and another frame.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between th... | [
"Returns",
"a",
"new",
":",
"class",
":",
"DataFrame",
"containing",
"union",
"of",
"rows",
"in",
"this",
"and",
"another",
"frame",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1466-L1485 | train | Returns a new DataFrame containing union of rows in this and another DataFrame. | [
30522,
13366,
2586,
3762,
18442,
1006,
2969,
1010,
2060,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
2047,
1024,
2465,
1024,
1036,
2951,
15643,
1036,
4820,
2586,
1997,
10281,
1999,
2023,
1998,
2178,
4853,
1012,
2023,
2003,
2367,
2013,
2119,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/stat/_statistics.py | Statistics.corr | def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the... | python | def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the... | [
"def",
"corr",
"(",
"x",
",",
"y",
"=",
"None",
",",
"method",
"=",
"None",
")",
":",
"# Check inputs to determine whether a single value or a matrix is needed for output.",
"# Since it's legal for users to use the method name as the second argument, we need to",
"# check if y is use... | Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to spec... | [
"Compute",
"the",
"correlation",
"(",
"matrix",
")",
"for",
"the",
"input",
"RDD",
"(",
"s",
")",
"using",
"the",
"specified",
"method",
".",
"Methods",
"currently",
"supported",
":",
"I",
"{",
"pearson",
"(",
"default",
")",
"spearman",
"}",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/stat/_statistics.py#L97-L157 | train | Compute the correlation matrix for the input RDDs x and y. | [
30522,
13366,
2522,
12171,
1006,
1060,
1010,
1061,
1027,
3904,
1010,
4118,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
24134,
1996,
16902,
1006,
8185,
1007,
2005,
1996,
7953,
16428,
2094,
1006,
1055,
1007,
2478,
1996,
9675,
4118,
1012,
4725,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/heapq3.py | _siftdown_max | def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
... | python | def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
... | [
"def",
"_siftdown_max",
"(",
"heap",
",",
"startpos",
",",
"pos",
")",
":",
"newitem",
"=",
"heap",
"[",
"pos",
"]",
"# Follow the path to the root, moving parents down until finding a place",
"# newitem fits.",
"while",
"pos",
">",
"startpos",
":",
"parentpos",
"=",
... | Maxheap variant of _siftdown | [
"Maxheap",
"variant",
"of",
"_siftdown"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L559-L572 | train | Maxheap variant of _siftdown | [
30522,
13366,
1035,
9033,
6199,
7698,
1035,
4098,
1006,
16721,
1010,
2707,
6873,
2015,
1010,
13433,
2015,
1007,
1024,
1005,
4098,
20192,
2361,
8349,
1997,
1035,
9033,
6199,
7698,
1005,
2047,
4221,
2213,
1027,
16721,
1031,
13433,
2015,
1033,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/column.py | _bin_op | def _bin_op(name, doc="binary operator"):
""" Create a method for given binary operator
"""
def _(self, other):
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _ | python | def _bin_op(name, doc="binary operator"):
""" Create a method for given binary operator
"""
def _(self, other):
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _ | [
"def",
"_bin_op",
"(",
"name",
",",
"doc",
"=",
"\"binary operator\"",
")",
":",
"def",
"_",
"(",
"self",
",",
"other",
")",
":",
"jc",
"=",
"other",
".",
"_jc",
"if",
"isinstance",
"(",
"other",
",",
"Column",
")",
"else",
"other",
"njc",
"=",
"ge... | Create a method for given binary operator | [
"Create",
"a",
"method",
"for",
"given",
"binary",
"operator"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L110-L118 | train | Create a method that returns a new object for the given binary operator. | [
30522,
13366,
1035,
8026,
1035,
6728,
1006,
2171,
1010,
9986,
1027,
1000,
12441,
6872,
1000,
1007,
1024,
1000,
1000,
1000,
3443,
1037,
4118,
2005,
2445,
12441,
6872,
1000,
1000,
1000,
13366,
1035,
1006,
2969,
1010,
2060,
1007,
1024,
29175,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/sorting.py | get_group_index_sorter | def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby key... | python | def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby key... | [
"def",
"get_group_index_sorter",
"(",
"group_index",
",",
"ngroups",
")",
":",
"count",
"=",
"len",
"(",
"group_index",
")",
"alpha",
"=",
"0.0",
"# taking complexities literally; there may be",
"beta",
"=",
"1.0",
"# some room for fine-tuning these parameters",
"do_group... | algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argso... | [
"algos",
".",
"groupsort_indexer",
"implements",
"counting",
"sort",
"and",
"it",
"is",
"at",
"least",
"O",
"(",
"ngroups",
")",
"where",
"ngroups",
"=",
"prod",
"(",
"shape",
")",
"shape",
"=",
"map",
"(",
"len",
"keys",
")",
"that",
"is",
"linear",
"... | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sorting.py#L338-L362 | train | This function returns the index of the unique items in the group_index. | [
30522,
13366,
2131,
1035,
2177,
1035,
5950,
1035,
4066,
2121,
1006,
2177,
1035,
5950,
1010,
12835,
22107,
2015,
1007,
1024,
1000,
1000,
1000,
2632,
12333,
1012,
2967,
11589,
1035,
5950,
2121,
22164,
1036,
10320,
4066,
1036,
1998,
2009,
2003... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/feature.py | HashingTF.transform | def transform(self, document):
"""
Transforms the input document (list of terms) to term frequency
vectors, or transform the RDD of document to RDD of term
frequency vectors.
"""
if isinstance(document, RDD):
return document.map(self.transform)
freq =... | python | def transform(self, document):
"""
Transforms the input document (list of terms) to term frequency
vectors, or transform the RDD of document to RDD of term
frequency vectors.
"""
if isinstance(document, RDD):
return document.map(self.transform)
freq =... | [
"def",
"transform",
"(",
"self",
",",
"document",
")",
":",
"if",
"isinstance",
"(",
"document",
",",
"RDD",
")",
":",
"return",
"document",
".",
"map",
"(",
"self",
".",
"transform",
")",
"freq",
"=",
"{",
"}",
"for",
"term",
"in",
"document",
":",
... | Transforms the input document (list of terms) to term frequency
vectors, or transform the RDD of document to RDD of term
frequency vectors. | [
"Transforms",
"the",
"input",
"document",
"(",
"list",
"of",
"terms",
")",
"to",
"term",
"frequency",
"vectors",
"or",
"transform",
"the",
"RDD",
"of",
"document",
"to",
"RDD",
"of",
"term",
"frequency",
"vectors",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/feature.py#L473-L486 | train | Transforms the input document to term frequency
vectors or transform the RDD of document to RDD of term frequency
vectors or transform the input document to RDD of term frequency
vectors or transform the RDD of document to RDD of term frequency
vectors. | [
30522,
13366,
10938,
1006,
2969,
1010,
6254,
1007,
1024,
1000,
1000,
1000,
21743,
1996,
7953,
6254,
1006,
2862,
1997,
3408,
1007,
2000,
2744,
6075,
19019,
1010,
2030,
10938,
1996,
16428,
2094,
1997,
6254,
2000,
16428,
2094,
1997,
2744,
6075... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/util.py | LinearDataGenerator.generateLinearRDD | def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(... | python | def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(... | [
"def",
"generateLinearRDD",
"(",
"sc",
",",
"nexamples",
",",
"nfeatures",
",",
"eps",
",",
"nParts",
"=",
"2",
",",
"intercept",
"=",
"0.0",
")",
":",
"return",
"callMLlibFunc",
"(",
"\"generateLinearRDDWrapper\"",
",",
"sc",
",",
"int",
"(",
"nexamples",
... | Generate an RDD of LabeledPoints. | [
"Generate",
"an",
"RDD",
"of",
"LabeledPoints",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/util.py#L494-L501 | train | Generate an RDD of LabeledPoints. | [
30522,
13366,
9699,
4179,
2906,
4103,
2094,
1006,
8040,
1010,
11265,
18684,
23344,
2015,
1010,
1050,
7959,
4017,
14900,
1010,
20383,
1010,
27937,
20591,
1027,
1016,
1010,
19115,
1027,
1014,
1012,
1014,
1007,
1024,
1000,
1000,
1000,
9699,
20... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/shuffle.py | ExternalMerger._spill | def _spill(self):
"""
dump already partitioned data into disks.
It will dump the data in batch for better performance.
"""
global MemoryBytesSpilled, DiskBytesSpilled
path = self._get_spill_dir(self.spills)
if not os.path.exists(path):
os.makedirs(pat... | python | def _spill(self):
"""
dump already partitioned data into disks.
It will dump the data in batch for better performance.
"""
global MemoryBytesSpilled, DiskBytesSpilled
path = self._get_spill_dir(self.spills)
if not os.path.exists(path):
os.makedirs(pat... | [
"def",
"_spill",
"(",
"self",
")",
":",
"global",
"MemoryBytesSpilled",
",",
"DiskBytesSpilled",
"path",
"=",
"self",
".",
"_get_spill_dir",
"(",
"self",
".",
"spills",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".... | dump already partitioned data into disks.
It will dump the data in batch for better performance. | [
"dump",
"already",
"partitioned",
"data",
"into",
"disks",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L291-L337 | train | This function will dump already partitioned data into disks. It will dump the data into the disks and the memory used by the memory. | [
30522,
13366,
1035,
14437,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
15653,
2525,
13571,
2098,
2951,
2046,
23999,
1012,
2009,
2097,
15653,
1996,
2951,
1999,
14108,
2005,
2488,
2836,
1012,
1000,
1000,
1000,
3795,
3638,
3762,
4570,
13102,
104... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
huggingface/pytorch-pretrained-BERT | pytorch_pretrained_bert/modeling_openai.py | load_tf_weights_in_openai_gpt | def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
print("Loading weights...")
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", ... | python | def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
print("Loading weights...")
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", ... | [
"def",
"load_tf_weights_in_openai_gpt",
"(",
"model",
",",
"openai_checkpoint_folder_path",
")",
":",
"import",
"re",
"import",
"numpy",
"as",
"np",
"print",
"(",
"\"Loading weights...\"",
")",
"names",
"=",
"json",
".",
"load",
"(",
"open",
"(",
"openai_checkpoin... | Load tf pre-trained weights in a pytorch model (from NumPy arrays here) | [
"Load",
"tf",
"pre",
"-",
"trained",
"weights",
"in",
"a",
"pytorch",
"model",
"(",
"from",
"NumPy",
"arrays",
"here",
")"
] | b832d5bb8a6dfc5965015b828e577677eace601e | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L46-L113 | train | Load tf pre - trained weights in a pytorch model. | [
30522,
13366,
7170,
1035,
1056,
2546,
1035,
15871,
1035,
1999,
1035,
2330,
4886,
1035,
14246,
2102,
1006,
2944,
1010,
2330,
4886,
1035,
26520,
1035,
19622,
1035,
4130,
1007,
1024,
1000,
1000,
1000,
7170,
1056,
2546,
3653,
30524,
15871,
1012... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.alias | def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1... | python | def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1... | [
"def",
"alias",
"(",
"self",
",",
"alias",
")",
":",
"assert",
"isinstance",
"(",
"alias",
",",
"basestring",
")",
",",
"\"alias should be a string\"",
"return",
"DataFrame",
"(",
"getattr",
"(",
"self",
".",
"_jdf",
",",
"\"as\"",
")",
"(",
"alias",
")",
... | Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") ==... | [
"Returns",
"a",
"new",
":",
"class",
":",
"DataFrame",
"with",
"an",
"alias",
"set",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L958-L971 | train | Returns a new DataFrame with an alias set. | [
30522,
13366,
14593,
1006,
2969,
1010,
14593,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
2047,
1024,
2465,
1024,
1036,
2951,
15643,
1036,
2007,
2019,
14593,
2275,
1012,
1024,
11498,
2213,
14593,
1024,
5164,
1010,
2019,
14593,
2171,
2000,
202... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/io/parquet.py | to_parquet | def to_parquet(df, path, engine='auto', compression='snappy', index=None,
partition_cols=None, **kwargs):
"""
Write a DataFrame to the parquet format.
Parameters
----------
path : str
File path or Root Directory path. Will be used as Root Directory path
while writing ... | python | def to_parquet(df, path, engine='auto', compression='snappy', index=None,
partition_cols=None, **kwargs):
"""
Write a DataFrame to the parquet format.
Parameters
----------
path : str
File path or Root Directory path. Will be used as Root Directory path
while writing ... | [
"def",
"to_parquet",
"(",
"df",
",",
"path",
",",
"engine",
"=",
"'auto'",
",",
"compression",
"=",
"'snappy'",
",",
"index",
"=",
"None",
",",
"partition_cols",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"impl",
"=",
"get_engine",
"(",
"engine",
... | Write a DataFrame to the parquet format.
Parameters
----------
path : str
File path or Root Directory path. Will be used as Root Directory path
while writing a partitioned dataset.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
P... | [
"Write",
"a",
"DataFrame",
"to",
"the",
"parquet",
"format",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parquet.py#L213-L251 | train | Write a DataFrame to the Parquet format. | [
30522,
13366,
2000,
1035,
11968,
12647,
1006,
1040,
2546,
1010,
4130,
1010,
3194,
1027,
1005,
8285,
1005,
1010,
13379,
1027,
1005,
10245,
7685,
1005,
1010,
5950,
1027,
3904,
1010,
13571,
1035,
8902,
2015,
1027,
3904,
1010,
1008,
1008,
6448,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/internals/blocks.py | Block.is_categorical_astype | def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if dtype is Categorical or dtype is CategoricalDtype:
# this is a pd.Categorical, but is not
# a valid type for ast... | python | def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if dtype is Categorical or dtype is CategoricalDtype:
# this is a pd.Categorical, but is not
# a valid type for ast... | [
"def",
"is_categorical_astype",
"(",
"self",
",",
"dtype",
")",
":",
"if",
"dtype",
"is",
"Categorical",
"or",
"dtype",
"is",
"CategoricalDtype",
":",
"# this is a pd.Categorical, but is not",
"# a valid type for astypeing",
"raise",
"TypeError",
"(",
"\"invalid type {0} ... | validate that we have a astypeable to categorical,
returns a boolean if we are a categorical | [
"validate",
"that",
"we",
"have",
"a",
"astypeable",
"to",
"categorical",
"returns",
"a",
"boolean",
"if",
"we",
"are",
"a",
"categorical"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L145-L158 | train | validate that we have a astypeable to categorical return a boolean if we are a categorical | [
30522,
13366,
2003,
1035,
4937,
27203,
1035,
2004,
13874,
1006,
2969,
1010,
26718,
18863,
1007,
1024,
1000,
1000,
1000,
9398,
3686,
2008,
2057,
2031,
1037,
2004,
13874,
3085,
2000,
4937,
27203,
1010,
5651,
1037,
22017,
20898,
2065,
2057,
20... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._maybe_cast_indexer | def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index, then try to cast
to an int if equivalent.
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
... | python | def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index, then try to cast
to an int if equivalent.
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
... | [
"def",
"_maybe_cast_indexer",
"(",
"self",
",",
"key",
")",
":",
"if",
"is_float",
"(",
"key",
")",
"and",
"not",
"self",
".",
"is_floating",
"(",
")",
":",
"try",
":",
"ckey",
"=",
"int",
"(",
"key",
")",
"if",
"ckey",
"==",
"key",
":",
"key",
"... | If we have a float key and are not a floating index, then try to cast
to an int if equivalent. | [
"If",
"we",
"have",
"a",
"float",
"key",
"and",
"are",
"not",
"a",
"floating",
"index",
"then",
"try",
"to",
"cast",
"to",
"an",
"int",
"if",
"equivalent",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4675-L4688 | train | Try to cast the key to an int if equivalent. | [
30522,
13366,
1035,
2672,
1035,
3459,
1035,
5950,
2121,
1006,
2969,
1010,
3145,
1007,
1024,
1000,
1000,
1000,
2065,
2057,
2031,
1037,
14257,
3145,
1998,
2024,
2025,
1037,
8274,
5950,
1010,
2059,
3046,
2000,
3459,
2000,
2019,
20014,
2065,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.localCheckpoint | def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this DataFrame, which is especially useful in iterative
algorithms where the plan may grow exponentially. Local checkpoints are stored in th... | python | def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this DataFrame, which is especially useful in iterative
algorithms where the plan may grow exponentially. Local checkpoints are stored in th... | [
"def",
"localCheckpoint",
"(",
"self",
",",
"eager",
"=",
"True",
")",
":",
"jdf",
"=",
"self",
".",
"_jdf",
".",
"localCheckpoint",
"(",
"eager",
")",
"return",
"DataFrame",
"(",
"jdf",
",",
"self",
".",
"sql_ctx",
")"
] | Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this DataFrame, which is especially useful in iterative
algorithms where the plan may grow exponentially. Local checkpoints are stored in the
executors using the caching subsystem an... | [
"Returns",
"a",
"locally",
"checkpointed",
"version",
"of",
"this",
"Dataset",
".",
"Checkpointing",
"can",
"be",
"used",
"to",
"truncate",
"the",
"logical",
"plan",
"of",
"this",
"DataFrame",
"which",
"is",
"especially",
"useful",
"in",
"iterative",
"algorithms... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L420-L431 | train | Returns a locally checkpointed version of this Dataset. | [
30522,
13366,
2334,
5403,
3600,
8400,
1006,
2969,
1010,
9461,
1027,
2995,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
7246,
26520,
2098,
2544,
1997,
2023,
2951,
13462,
1012,
26520,
2075,
2064,
2022,
2109,
2000,
19817,
4609,
16280,
1996,
11177... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/internals/blocks.py | Block._check_ndim | def _check_ndim(self, values, ndim):
"""
ndim inference and validation.
Infers ndim from 'values' if not provided to __init__.
Validates that values.ndim and ndim are consistent if and only if
the class variable '_validate_ndim' is True.
Parameters
----------
... | python | def _check_ndim(self, values, ndim):
"""
ndim inference and validation.
Infers ndim from 'values' if not provided to __init__.
Validates that values.ndim and ndim are consistent if and only if
the class variable '_validate_ndim' is True.
Parameters
----------
... | [
"def",
"_check_ndim",
"(",
"self",
",",
"values",
",",
"ndim",
")",
":",
"if",
"ndim",
"is",
"None",
":",
"ndim",
"=",
"values",
".",
"ndim",
"if",
"self",
".",
"_validate_ndim",
"and",
"values",
".",
"ndim",
"!=",
"ndim",
":",
"msg",
"=",
"(",
"\"... | ndim inference and validation.
Infers ndim from 'values' if not provided to __init__.
Validates that values.ndim and ndim are consistent if and only if
the class variable '_validate_ndim' is True.
Parameters
----------
values : array-like
ndim : int or None
... | [
"ndim",
"inference",
"and",
"validation",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L87-L116 | train | Check that the number of dimensions of the log entry is consistent with the number of entries in the array. | [
30522,
13366,
1035,
4638,
1035,
1050,
22172,
1006,
2969,
1010,
5300,
1010,
1050,
22172,
1007,
1024,
1000,
1000,
1000,
1050,
22172,
28937,
1998,
27354,
1012,
1999,
24396,
1050,
22172,
2013,
30524,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
10... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._get_unique_index | def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropn... | python | def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropn... | [
"def",
"_get_unique_index",
"(",
"self",
",",
"dropna",
"=",
"False",
")",
":",
"if",
"self",
".",
"is_unique",
"and",
"not",
"dropna",
":",
"return",
"self",
"values",
"=",
"self",
".",
"values",
"if",
"not",
"self",
".",
"is_unique",
":",
"values",
"... | Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index | [
"Returns",
"an",
"index",
"containing",
"unique",
"values",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2164-L2192 | train | Returns an index containing unique values. | [
30522,
13366,
1035,
2131,
1035,
4310,
1035,
5950,
1006,
2969,
1010,
4530,
2532,
1027,
6270,
1007,
1024,
1000,
1000,
1000,
5651,
2019,
5950,
4820,
4310,
5300,
1012,
11709,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
4530,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/types.py | _create_converter | def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif i... | python | def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif i... | [
"def",
"_create_converter",
"(",
"dataType",
")",
":",
"if",
"not",
"_need_converter",
"(",
"dataType",
")",
":",
"return",
"lambda",
"x",
":",
"x",
"if",
"isinstance",
"(",
"dataType",
",",
"ArrayType",
")",
":",
"conv",
"=",
"_create_converter",
"(",
"da... | Create a converter to drop the names of fields in obj | [
"Create",
"a",
"converter",
"to",
"drop",
"the",
"names",
"of",
"fields",
"in",
"obj"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1133-L1180 | train | Create a converter to drop the names of fields in obj | [
30522,
13366,
1035,
3443,
1035,
10463,
2121,
1006,
2951,
13874,
1007,
1024,
1000,
1000,
1000,
3443,
1037,
10463,
2121,
2000,
4530,
1996,
3415,
1997,
4249,
1999,
27885,
3501,
1000,
1000,
1000,
2065,
2025,
1035,
2342,
1035,
10463,
2121,
1006,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/reshape/melt.py | wide_to_long | def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You ... | python | def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You ... | [
"def",
"wide_to_long",
"(",
"df",
",",
"stubnames",
",",
"i",
",",
"j",
",",
"sep",
"=",
"\"\"",
",",
"suffix",
"=",
"r'\\d+'",
")",
":",
"def",
"get_var_names",
"(",
"df",
",",
"stub",
",",
"sep",
",",
"suffix",
")",
":",
"regex",
"=",
"r'^{stub}{... | r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long fo... | [
"r",
"Wide",
"panel",
"to",
"long",
"format",
".",
"Less",
"flexible",
"but",
"more",
"user",
"-",
"friendly",
"than",
"melt",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/melt.py#L178-L458 | train | r Returns a new wide - format tree that is a wide - format tree where the first row is the id variable and the second is the name of the sub - observation variable. | [
30522,
13366,
2898,
1035,
2000,
1035,
2146,
1006,
1040,
2546,
1010,
24646,
24700,
14074,
2015,
1010,
1045,
1010,
1046,
1010,
19802,
1027,
1000,
1000,
1010,
16809,
1027,
1054,
1005,
1032,
1040,
1009,
1005,
1007,
1024,
1054,
1000,
1000,
1000,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | approx_count_distinct | def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_coun... | python | def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_coun... | [
"def",
"approx_count_distinct",
"(",
"col",
",",
"rsd",
"=",
"None",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"if",
"rsd",
"is",
"None",
":",
"jc",
"=",
"sc",
".",
"_jvm",
".",
"functions",
".",
"approx_count_distinct",
"(",
"_to_... | Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collec... | [
"Aggregate",
"function",
":",
"returns",
"a",
"new",
":",
"class",
":",
"Column",
"for",
"approximate",
"distinct",
"count",
"of",
"column",
"col",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L314-L329 | train | Aggregate function that returns a new column for approximate distinct count of
column col. | [
30522,
13366,
22480,
1035,
4175,
1035,
5664,
1006,
8902,
1010,
12667,
2094,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
9572,
3853,
1024,
5651,
1037,
2047,
1024,
2465,
1024,
1036,
5930,
1036,
2005,
15796,
5664,
4175,
1997,
5930,
1036,
8902,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.aggregateByKey | def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
... | python | def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
... | [
"def",
"aggregateByKey",
"(",
"self",
",",
"zeroValue",
",",
"seqFunc",
",",
"combFunc",
",",
"numPartitions",
"=",
"None",
",",
"partitionFunc",
"=",
"portable_hash",
")",
":",
"def",
"createZero",
"(",
")",
":",
"return",
"copy",
".",
"deepcopy",
"(",
"z... | Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former oper... | [
"Aggregate",
"the",
"values",
"of",
"each",
"key",
"using",
"given",
"combine",
"functions",
"and",
"a",
"neutral",
"zero",
"value",
".",
"This",
"function",
"can",
"return",
"a",
"different",
"result",
"type",
"U",
"than",
"the",
"type",
"of",
"the",
"val... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1876-L1891 | train | Aggregate the values of each key using given combine functions and a neutral
zero value. | [
30522,
13366,
9572,
3762,
14839,
1006,
2969,
1010,
5717,
10175,
5657,
1010,
7367,
4160,
11263,
12273,
1010,
22863,
11263,
12273,
1010,
16371,
8737,
8445,
22753,
2015,
1027,
3904,
1010,
13571,
11263,
12273,
1027,
12109,
1035,
23325,
1007,
1024... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | _create_binary_mathfunction | def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
if isinsta... | python | def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
if isinsta... | [
"def",
"_create_binary_mathfunction",
"(",
"name",
",",
"doc",
"=",
"\"\"",
")",
":",
"def",
"_",
"(",
"col1",
",",
"col2",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"# For legacy reasons, the arguments here can be implicitly converted into floa... | Create a binary mathfunction by name | [
"Create",
"a",
"binary",
"mathfunction",
"by",
"name"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L80-L104 | train | Create a binary math function by name | [
30522,
13366,
1035,
3443,
1035,
12441,
1035,
8785,
11263,
27989,
1006,
2171,
1010,
9986,
1027,
1000,
1000,
1007,
1024,
1000,
1000,
1000,
3443,
1037,
12441,
8785,
11263,
27989,
2011,
2171,
1000,
1000,
1000,
13366,
1035,
1006,
8902,
2487,
101... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/shuffle.py | ExternalSorter._get_path | def _get_path(self, n):
""" Choose one directory for spill by number n """
d = self.local_dirs[n % len(self.local_dirs)]
if not os.path.exists(d):
os.makedirs(d)
return os.path.join(d, str(n)) | python | def _get_path(self, n):
""" Choose one directory for spill by number n """
d = self.local_dirs[n % len(self.local_dirs)]
if not os.path.exists(d):
os.makedirs(d)
return os.path.join(d, str(n)) | [
"def",
"_get_path",
"(",
"self",
",",
"n",
")",
":",
"d",
"=",
"self",
".",
"local_dirs",
"[",
"n",
"%",
"len",
"(",
"self",
".",
"local_dirs",
")",
"]",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"d",
")",
":",
"os",
".",
"makedirs",
... | Choose one directory for spill by number n | [
"Choose",
"one",
"directory",
"for",
"spill",
"by",
"number",
"n"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L440-L445 | train | Choose one directory for spill by number n | [
30522,
13366,
1035,
2131,
1035,
4130,
1006,
2969,
1010,
1050,
1007,
1024,
1000,
1000,
1000,
5454,
2028,
14176,
2005,
14437,
2011,
2193,
1050,
1000,
1000,
1000,
1040,
1027,
2969,
1012,
2334,
1035,
16101,
2015,
1031,
30524,
1006,
1050,
1007,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/streaming.py | StreamingQueryManager.awaitAnyTermination | def awaitAnyTermination(self, timeout=None):
"""Wait until any of the queries on the associated SQLContext has terminated since the
creation of the context, or since :func:`resetTerminated()` was called. If any query was
terminated with an exception, then the exception will be thrown.
If... | python | def awaitAnyTermination(self, timeout=None):
"""Wait until any of the queries on the associated SQLContext has terminated since the
creation of the context, or since :func:`resetTerminated()` was called. If any query was
terminated with an exception, then the exception will be thrown.
If... | [
"def",
"awaitAnyTermination",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"timeout",
",",
"(",
"int",
",",
"float",
")",
")",
"or",
"timeout",
"<",
"0",
":",
"raise",
... | Wait until any of the queries on the associated SQLContext has terminated since the
creation of the context, or since :func:`resetTerminated()` was called. If any query was
terminated with an exception, then the exception will be thrown.
If `timeout` is set, it returns whether the query has term... | [
"Wait",
"until",
"any",
"of",
"the",
"queries",
"on",
"the",
"associated",
"SQLContext",
"has",
"terminated",
"since",
"the",
"creation",
"of",
"the",
"context",
"or",
"since",
":",
"func",
":",
"resetTerminated",
"()",
"was",
"called",
".",
"If",
"any",
"... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L240-L265 | train | Wait until any of the queries on the associated SQLContext has terminated or until the timeout is reached. | [
30522,
13366,
26751,
19092,
3334,
22311,
3508,
1006,
2969,
1010,
2051,
5833,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
3524,
2127,
2151,
1997,
1996,
10861,
5134,
2006,
1996,
3378,
29296,
8663,
18209,
2038,
12527,
2144,
1996,
4325,
1997,
199... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/types.py | _make_type_verifier | def _make_type_verifier(dataType, nullable=True, name=None):
"""
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. u... | python | def _make_type_verifier(dataType, nullable=True, name=None):
"""
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. u... | [
"def",
"_make_type_verifier",
"(",
"dataType",
",",
"nullable",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"new_msg",
"=",
"lambda",
"msg",
":",
"msg",
"new_name",
"=",
"lambda",
"n",
":",
"\"field %s\"",
"%",
"n"... | Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is
not ch... | [
"Make",
"a",
"verifier",
"that",
"checks",
"the",
"type",
"of",
"obj",
"against",
"dataType",
"and",
"raises",
"a",
"TypeError",
"if",
"they",
"do",
"not",
"match",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1202-L1391 | train | Returns a verifier that checks the type of obj against dataType and raises a TypeError if they do not match. | [
30522,
13366,
1035,
2191,
1035,
2828,
1035,
2310,
3089,
8873,
2121,
1006,
2951,
13874,
1010,
19701,
3085,
1027,
2995,
1010,
2171,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2191,
1037,
2310,
3089,
8873,
2121,
2008,
14148,
1996,
2828,
1997,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.countApproxDistinct | def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Ca... | python | def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Ca... | [
"def",
"countApproxDistinct",
"(",
"self",
",",
"relativeSD",
"=",
"0.05",
")",
":",
"if",
"relativeSD",
"<",
"0.000017",
":",
"raise",
"ValueError",
"(",
"\"relativeSD should be greater than 0.000017\"",
")",
"# the hash space in Java is 2^32",
"hashRDD",
"=",
"self",
... | .. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi... | [
"..",
"note",
"::",
"Experimental"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2350-L2376 | train | Return approximate number of distinct elements in the RDD. | [
30522,
13366,
4175,
29098,
3217,
2595,
10521,
7629,
6593,
1006,
2969,
1010,
9064,
2094,
1027,
1014,
1012,
5709,
1007,
1024,
1000,
1000,
1000,
1012,
1012,
3602,
1024,
1024,
6388,
2709,
15796,
2193,
1997,
5664,
3787,
1999,
1996,
16428,
2094,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/readwriter.py | DataFrameReader.load | def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to ... | python | def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to ... | [
"def",
"load",
"(",
"self",
",",
"path",
"=",
"None",
",",
"format",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"if",
"format",
"is",
"not",
"None",
":",
"self",
".",
"format",
"(",
"format",
")",
"if",
"schema",... | Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`pyspark.sql.types.Struct... | [
"Loads",
"data",
"from",
"a",
"data",
"source",
"and",
"returns",
"it",
"as",
"a",
":",
"class",
"DataFrame",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L141-L172 | train | Loads data from a file - system backed data source and returns it as a : class : DataFrame. | [
30522,
13366,
7170,
1006,
2969,
1010,
4130,
1027,
3904,
1010,
4289,
1027,
3904,
1010,
8040,
28433,
1027,
3904,
1010,
1008,
1008,
7047,
1007,
1024,
1000,
1000,
1000,
15665,
2951,
2013,
1037,
2951,
3120,
1998,
5651,
2009,
2004,
1037,
1024,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
huggingface/pytorch-pretrained-BERT | examples/run_classifier.py | DataProcessor._read_tsv | def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0]... | python | def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0]... | [
"def",
"_read_tsv",
"(",
"cls",
",",
"input_file",
",",
"quotechar",
"=",
"None",
")",
":",
"with",
"open",
"(",
"input_file",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"f",
",",
... | Reads a tab separated value file. | [
"Reads",
"a",
"tab",
"separated",
"value",
"file",
"."
] | b832d5bb8a6dfc5965015b828e577677eace601e | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/examples/run_classifier.py#L93-L102 | train | Reads a tab separated value file. | [
30522,
13366,
1035,
3191,
1035,
24529,
2615,
1006,
18856,
2015,
1010,
7953,
1035,
5371,
1010,
14686,
7507,
2099,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
9631,
1037,
21628,
5459,
3643,
5371,
1012,
1000,
1000,
1000,
2007,
2330,
1006,
7953,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
ageitgey/face_recognition | face_recognition/api.py | _rect_to_css | def _rect_to_css(rect):
"""
Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order
:param rect: a dlib 'rect' object
:return: a plain tuple representation of the rect in (top, right, bottom, left) order
"""
return rect.top(), rect.right(), rect.bottom(), rect.left() | python | def _rect_to_css(rect):
"""
Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order
:param rect: a dlib 'rect' object
:return: a plain tuple representation of the rect in (top, right, bottom, left) order
"""
return rect.top(), rect.right(), rect.bottom(), rect.left() | [
"def",
"_rect_to_css",
"(",
"rect",
")",
":",
"return",
"rect",
".",
"top",
"(",
")",
",",
"rect",
".",
"right",
"(",
")",
",",
"rect",
".",
"bottom",
"(",
")",
",",
"rect",
".",
"left",
"(",
")"
] | Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order
:param rect: a dlib 'rect' object
:return: a plain tuple representation of the rect in (top, right, bottom, left) order | [
"Convert",
"a",
"dlib",
"rect",
"object",
"to",
"a",
"plain",
"tuple",
"in",
"(",
"top",
"right",
"bottom",
"left",
")",
"order"
] | c96b010c02f15e8eeb0f71308c641179ac1f19bb | https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L32-L39 | train | Convert a dlib rect object to a plain tuple in ( top right bottom left | [
30522,
13366,
1035,
28667,
2102,
1035,
2000,
1035,
20116,
2015,
1006,
28667,
2102,
1007,
1024,
1000,
1000,
1000,
10463,
1037,
21469,
12322,
1005,
28667,
2102,
1005,
4874,
2000,
1037,
5810,
10722,
10814,
1999,
1006,
2327,
1010,
2157,
1010,
3... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | conv | def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._... | python | def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._... | [
"def",
"conv",
"(",
"col",
",",
"fromBase",
",",
"toBase",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"conv",
"(",
"_to_java_column",
"(",
"col",
")",
",",
"fromBa... | Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')] | [
"Convert",
"a",
"number",
"in",
"a",
"string",
"column",
"from",
"one",
"base",
"to",
"another",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L810-L819 | train | Convert a number in a string column from one base to another. | [
30522,
13366,
9530,
2615,
1006,
8902,
1010,
2013,
15058,
1010,
2000,
15058,
1007,
1024,
1000,
1000,
1000,
10463,
1037,
2193,
1999,
1037,
5164,
5930,
2013,
2028,
2918,
2000,
2178,
1012,
1028,
1028,
1028,
1040,
2546,
1027,
12125,
1012,
2580,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/missing.py | mask_missing | def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
... | python | def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
... | [
"def",
"mask_missing",
"(",
"arr",
",",
"values_to_mask",
")",
":",
"dtype",
",",
"values_to_mask",
"=",
"infer_dtype_from_array",
"(",
"values_to_mask",
")",
"try",
":",
"values_to_mask",
"=",
"np",
".",
"array",
"(",
"values_to_mask",
",",
"dtype",
"=",
"dty... | Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True | [
"Return",
"a",
"masking",
"array",
"of",
"same",
"size",
"/",
"shape",
"as",
"arr",
"with",
"entries",
"equaling",
"any",
"member",
"of",
"values_to_mask",
"set",
"to",
"True"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L18-L66 | train | Return a masking array of same size / shape as arr
Addon with entries equaling any member of values_to_mask set to True
Addon | [
30522,
13366,
7308,
1035,
4394,
1006,
12098,
2099,
1010,
5300,
1035,
2000,
1035,
7308,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
7308,
2075,
9140,
1997,
2168,
2946,
1013,
4338,
2004,
12098,
2099,
2007,
10445,
5020,
2075,
2151,
2266,
1997,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.difference | def difference(self, other, sort=None):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default None
... | python | def difference(self, other, sort=None):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default None
... | [
"def",
"difference",
"(",
"self",
",",
"other",
",",
"sort",
"=",
"None",
")",
":",
"self",
".",
"_validate_sort_keyword",
"(",
"sort",
")",
"self",
".",
"_assert_can_do_setop",
"(",
"other",
")",
"if",
"self",
".",
"equals",
"(",
"other",
")",
":",
"#... | Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort the resulting index. By default, th... | [
"Return",
"a",
"new",
"Index",
"with",
"elements",
"from",
"the",
"index",
"that",
"are",
"not",
"in",
"other",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2441-L2504 | train | Return a new Index with elements from the index that are not in the other. | [
30522,
13366,
4489,
1006,
2969,
1010,
2060,
1010,
4066,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
2047,
5950,
2007,
3787,
2013,
1996,
5950,
2008,
2024,
2025,
1999,
1036,
2060,
1036,
1012,
2023,
2003,
1996,
2275,
4489,
1997,
20... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.foldByKey | def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e... | python | def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e... | [
"def",
"foldByKey",
"(",
"self",
",",
"zeroValue",
",",
"func",
",",
"numPartitions",
"=",
"None",
",",
"partitionFunc",
"=",
"portable_hash",
")",
":",
"def",
"createZero",
"(",
")",
":",
"return",
"copy",
".",
"deepcopy",
"(",
"zeroValue",
")",
"return",... | Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a"... | [
"Merge",
"the",
"values",
"for",
"each",
"key",
"using",
"an",
"associative",
"function",
"func",
"and",
"a",
"neutral",
"zeroValue",
"which",
"may",
"be",
"added",
"to",
"the",
"result",
"an",
"arbitrary",
"number",
"of",
"times",
"and",
"must",
"not",
"c... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L1893-L1909 | train | Return a new table with the values for each key in the table grouped by func. | [
30522,
13366,
10671,
3762,
14839,
1006,
2969,
1010,
5717,
10175,
5657,
1010,
4569,
2278,
1010,
16371,
8737,
8445,
22753,
2015,
1027,
3904,
1010,
13571,
11263,
12273,
1027,
12109,
1035,
23325,
1007,
1024,
1000,
1000,
1000,
13590,
1996,
5300,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/streaming/context.py | StreamingContext.binaryRecordsStream | def binaryRecordsStream(self, directory, recordLength):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from ... | python | def binaryRecordsStream(self, directory, recordLength):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from ... | [
"def",
"binaryRecordsStream",
"(",
"self",
",",
"directory",
",",
"recordLength",
")",
":",
"return",
"DStream",
"(",
"self",
".",
"_jssc",
".",
"binaryRecordsStream",
"(",
"directory",
",",
"recordLength",
")",
",",
"self",
",",
"NoOpSerializer",
"(",
")",
... | Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from another location within the same file system.
File names starting wi... | [
"Create",
"an",
"input",
"stream",
"that",
"monitors",
"a",
"Hadoop",
"-",
"compatible",
"file",
"system",
"for",
"new",
"files",
"and",
"reads",
"them",
"as",
"flat",
"binary",
"files",
"with",
"records",
"of",
"fixed",
"length",
".",
"Files",
"must",
"be... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L265-L277 | train | Create an input stream that monitors a Hadoop - compatible file system
for new files and reads them as flat binary files with records of recordLength fixed length. | [
30522,
13366,
12441,
2890,
27108,
5104,
21422,
1006,
2969,
1010,
14176,
1010,
2501,
7770,
13512,
2232,
1007,
1024,
1000,
1000,
1000,
3443,
2019,
7953,
5460,
2008,
15410,
1037,
2018,
18589,
1011,
11892,
5371,
2291,
2005,
2047,
6764,
1998,
96... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/param/__init__.py | TypeConverters.toVector | def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._i... | python | def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._i... | [
"def",
"toVector",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Vector",
")",
":",
"return",
"value",
"elif",
"TypeConverters",
".",
"_can_convert_to_list",
"(",
"value",
")",
":",
"value",
"=",
"TypeConverters",
".",
"toList",
"(",
"val... | Convert a value to a MLlib Vector, if possible. | [
"Convert",
"a",
"value",
"to",
"a",
"MLlib",
"Vector",
"if",
"possible",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L160-L170 | train | Converts a value to a MLlib Vector if possible. | [
30522,
13366,
2000,
3726,
16761,
1006,
3643,
1007,
1024,
1000,
1000,
1000,
10463,
1037,
3643,
2000,
1037,
19875,
29521,
9207,
1010,
2065,
2825,
1012,
1000,
1000,
1000,
2065,
2003,
7076,
26897,
1006,
3643,
1010,
9207,
1007,
1024,
2709,
3643,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.persist | def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage l... | python | def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage l... | [
"def",
"persist",
"(",
"self",
",",
"storageLevel",
"=",
"StorageLevel",
".",
"MEMORY_AND_DISK",
")",
":",
"self",
".",
"is_cached",
"=",
"True",
"javaStorageLevel",
"=",
"self",
".",
"_sc",
".",
"_getJavaStorageLevel",
"(",
"storageLevel",
")",
"self",
".",
... | Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEM... | [
"Sets",
"the",
"storage",
"level",
"to",
"persist",
"the",
"contents",
"of",
"the",
":",
"class",
":",
"DataFrame",
"across",
"operations",
"after",
"the",
"first",
"time",
"it",
"is",
"computed",
".",
"This",
"can",
"only",
"be",
"used",
"to",
"assign",
... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L592-L603 | train | Sets the storage level to persist the contents of the DataFrame across the first time it is computed. | [
30522,
13366,
29486,
1006,
2969,
1010,
5527,
20414,
2884,
1027,
5527,
20414,
2884,
1012,
3638,
1035,
1998,
1035,
9785,
1007,
1024,
1000,
1000,
1000,
4520,
1996,
5527,
2504,
2000,
29486,
1996,
8417,
1997,
1996,
1024,
2465,
1024,
1036,
2951,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._coerce_scalar_to_index | def _coerce_scalar_to_index(self, item):
"""
We need to coerce a scalar to a compat for our index type.
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to t... | python | def _coerce_scalar_to_index(self, item):
"""
We need to coerce a scalar to a compat for our index type.
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to t... | [
"def",
"_coerce_scalar_to_index",
"(",
"self",
",",
"item",
")",
":",
"dtype",
"=",
"self",
".",
"dtype",
"if",
"self",
".",
"_is_numeric_dtype",
"and",
"isna",
"(",
"item",
")",
":",
"# We can't coerce to the numeric dtype of \"self\" (unless",
"# it's float) if ther... | We need to coerce a scalar to a compat for our index type.
Parameters
----------
item : scalar item to coerce | [
"We",
"need",
"to",
"coerce",
"a",
"scalar",
"to",
"a",
"compat",
"for",
"our",
"index",
"type",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3823-L3838 | train | Coerce a scalar item to an index. | [
30522,
13366,
1035,
24873,
19170,
1035,
26743,
2099,
1035,
2000,
1035,
5950,
1006,
2969,
1010,
8875,
1007,
1024,
1000,
1000,
1000,
2057,
2342,
2000,
24873,
19170,
1037,
26743,
2099,
2000,
1037,
4012,
4502,
2102,
2005,
2256,
5950,
2828,
1012... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | sql/gen-sql-markdown.py | _make_pretty_deprecated | def _make_pretty_deprecated(deprecated):
"""
Makes the deprecated description pretty and returns a formatted string if `deprecated`
is not an empty string. Otherwise, returns None.
Expected input:
...
Expected output:
**Deprecated:**
...
"""
if deprecated != "":
... | python | def _make_pretty_deprecated(deprecated):
"""
Makes the deprecated description pretty and returns a formatted string if `deprecated`
is not an empty string. Otherwise, returns None.
Expected input:
...
Expected output:
**Deprecated:**
...
"""
if deprecated != "":
... | [
"def",
"_make_pretty_deprecated",
"(",
"deprecated",
")",
":",
"if",
"deprecated",
"!=",
"\"\"",
":",
"deprecated",
"=",
"\"\\n\"",
".",
"join",
"(",
"map",
"(",
"lambda",
"n",
":",
"n",
"[",
"4",
":",
"]",
",",
"deprecated",
".",
"split",
"(",
"\"\\n\... | Makes the deprecated description pretty and returns a formatted string if `deprecated`
is not an empty string. Otherwise, returns None.
Expected input:
...
Expected output:
**Deprecated:**
... | [
"Makes",
"the",
"deprecated",
"description",
"pretty",
"and",
"returns",
"a",
"formatted",
"string",
"if",
"deprecated",
"is",
"not",
"an",
"empty",
"string",
".",
"Otherwise",
"returns",
"None",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/sql/gen-sql-markdown.py#L140-L158 | train | Makes the deprecated description pretty and returns a formatted string. | [
30522,
30524,
1037,
4289,
3064,
5164,
2065,
1036,
2139,
28139,
12921,
1036,
2003,
2025,
2019,
4064,
5164,
1012,
4728,
1010,
5651,
3904,
1012,
3517,
7953,
1024,
1012,
1012,
1012,
3517,
6434,
1024,
1008,
1008,
2139,
28139,
12921,
1024,
1008,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | from_csv | def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
... | python | def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
... | [
"def",
"from_csv",
"(",
"col",
",",
"schema",
",",
"options",
"=",
"{",
"}",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"if",
"isinstance",
"(",
"schema",
",",
"basestring",
")",
":",
"schema",
"=",
"_create_column_from_literal",
"(",... | Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. acc... | [
"Parses",
"a",
"column",
"containing",
"a",
"CSV",
"string",
"to",
"a",
"row",
"with",
"the",
"specified",
"schema",
".",
"Returns",
"null",
"in",
"the",
"case",
"of",
"an",
"unparseable",
"string",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L2762-L2789 | train | Parses a column containing a CSV string to a row with the specified schema. | [
30522,
13366,
2013,
1035,
20116,
2615,
1006,
8902,
1010,
8040,
28433,
1010,
7047,
1027,
1063,
1065,
1007,
1024,
1000,
1000,
1000,
11968,
8583,
1037,
5930,
4820,
1037,
20116,
2615,
5164,
2000,
1037,
5216,
2007,
1996,
9675,
8040,
28433,
1012,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.summary | def summary(self, name=None):
"""
Return a summarized representation.
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name) | python | def summary(self, name=None):
"""
Return a summarized representation.
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name) | [
"def",
"summary",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"\"'summary' is deprecated and will be removed in a \"",
"\"future version.\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"self",
".",
"_summar... | Return a summarized representation.
.. deprecated:: 0.23.0 | [
"Return",
"a",
"summarized",
"representation",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1090-L1098 | train | Return a summarized representation of the current object. | [
30522,
13366,
12654,
1006,
2969,
1010,
2171,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
22539,
6630,
1012,
1012,
1012,
2139,
28139,
12921,
1024,
1024,
1014,
1012,
2603,
1012,
1014,
1000,
1000,
1000,
16234,
1012,
11582,
1006,
1000... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/window.py | Rolling._validate_monotonic | def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted)) | python | def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or 'index'
raise ValueError("{0} must be "
"monotonic".format(formatted)) | [
"def",
"_validate_monotonic",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_on",
".",
"is_monotonic",
":",
"formatted",
"=",
"self",
".",
"on",
"or",
"'index'",
"raise",
"ValueError",
"(",
"\"{0} must be \"",
"\"monotonic\"",
".",
"format",
"(",
"format... | Validate on is_monotonic. | [
"Validate",
"on",
"is_monotonic",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/window.py#L1602-L1609 | train | Validate on is_monotonic. | [
30522,
13366,
1035,
9398,
3686,
1035,
18847,
25009,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
9398,
3686,
2006,
2003,
1035,
18847,
25009,
1012,
1000,
1000,
1000,
2065,
2025,
2969,
1012,
1035,
2006,
1012,
2003,
1035,
18847,
25009,
1024,
4289... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/context.py | SparkContext.runJob | def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.pa... | python | def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.pa... | [
"def",
"runJob",
"(",
"self",
",",
"rdd",
",",
"partitionFunc",
",",
"partitions",
"=",
"None",
",",
"allowLocal",
"=",
"False",
")",
":",
"if",
"partitions",
"is",
"None",
":",
"partitions",
"=",
"range",
"(",
"rdd",
".",
"_jrdd",
".",
"partitions",
"... | Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
... | [
"Executes",
"the",
"given",
"partitionFunc",
"on",
"the",
"specified",
"set",
"of",
"partitions",
"returning",
"the",
"result",
"as",
"an",
"array",
"of",
"elements",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L1052-L1075 | train | Runs the given partitionFunc on the specified set of partitions and returns the result as an array of elements. | [
30522,
13366,
2448,
5558,
2497,
1006,
2969,
1010,
16428,
2094,
1010,
13571,
11263,
12273,
1010,
13571,
2015,
1027,
3904,
1010,
3499,
4135,
9289,
1027,
6270,
1007,
1024,
1000,
1000,
1000,
15389,
2015,
1996,
2445,
13571,
11263,
12273,
2006,
1... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
huggingface/pytorch-pretrained-BERT | pytorch_pretrained_bert/modeling_openai.py | OpenAIGPTModel.set_num_special_tokens | def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initiali... | python | def set_num_special_tokens(self, num_special_tokens):
" Update input embeddings with new embedding matrice if needed "
if self.config.n_special == num_special_tokens:
return
# Update config
self.config.n_special = num_special_tokens
# Build new embeddings and initiali... | [
"def",
"set_num_special_tokens",
"(",
"self",
",",
"num_special_tokens",
")",
":",
"if",
"self",
".",
"config",
".",
"n_special",
"==",
"num_special_tokens",
":",
"return",
"# Update config",
"self",
".",
"config",
".",
"n_special",
"=",
"num_special_tokens",
"# B... | Update input embeddings with new embedding matrice if needed | [
"Update",
"input",
"embeddings",
"with",
"new",
"embedding",
"matrice",
"if",
"needed"
] | b832d5bb8a6dfc5965015b828e577677eace601e | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/modeling_openai.py#L605-L617 | train | Update input embeddings with new embedding matrice if needed | [
30522,
13366,
2275,
1035,
16371,
2213,
1035,
2569,
1035,
19204,
2015,
1006,
2969,
1010,
16371,
2213,
1035,
2569,
1035,
19204,
2015,
1007,
1024,
1000,
10651,
7953,
7861,
8270,
4667,
2015,
2007,
2047,
7861,
8270,
4667,
13523,
17599,
2065,
273... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.format | def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
... | python | def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
... | [
"def",
"format",
"(",
"self",
",",
"name",
"=",
"False",
",",
"formatter",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"header",
"=",
"[",
"]",
"if",
"name",
":",
"header",
".",
"append",
"(",
"pprint_thing",
"(",
"self",
".",
"name",
",",
"e... | Render a string representation of the Index. | [
"Render",
"a",
"string",
"representation",
"of",
"the",
"Index",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L981-L994 | train | Render a string representation of the Index. | [
30522,
13366,
4289,
1006,
2969,
1010,
2171,
1027,
6270,
1010,
4289,
3334,
1027,
3904,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
17552,
1037,
5164,
6630,
1997,
1996,
5950,
1012,
1000,
1000,
1000,
20346,
1027,
1031,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/session.py | SparkSession.catalog | def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalo... | python | def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalo... | [
"def",
"catalog",
"(",
"self",
")",
":",
"from",
"pyspark",
".",
"sql",
".",
"catalog",
"import",
"Catalog",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_catalog\"",
")",
":",
"self",
".",
"_catalog",
"=",
"Catalog",
"(",
"self",
")",
"return",
"self"... | Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog` | [
"Interface",
"through",
"which",
"the",
"user",
"may",
"create",
"drop",
"alter",
"or",
"query",
"underlying",
"databases",
"tables",
"functions",
"etc",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L311-L320 | train | Interface through which the user may create drop alter or query underlying
databases tables functions etc. | [
30522,
13366,
12105,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
8278,
2083,
2029,
1996,
5310,
2089,
3443,
1010,
4530,
1010,
11477,
2030,
23032,
10318,
17881,
1010,
7251,
1010,
4972,
4385,
1012,
1024,
2709,
1024,
1024,
2465,
1024,
1036,
12105... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/linalg/__init__.py | DenseMatrix.toArray | def toArray(self):
"""
Return an numpy.ndarray
>>> m = DenseMatrix(2, 2, range(4))
>>> m.toArray()
array([[ 0., 2.],
[ 1., 3.]])
"""
if self.isTransposed:
return np.asfortranarray(
self.values.reshape((self.numRows, se... | python | def toArray(self):
"""
Return an numpy.ndarray
>>> m = DenseMatrix(2, 2, range(4))
>>> m.toArray()
array([[ 0., 2.],
[ 1., 3.]])
"""
if self.isTransposed:
return np.asfortranarray(
self.values.reshape((self.numRows, se... | [
"def",
"toArray",
"(",
"self",
")",
":",
"if",
"self",
".",
"isTransposed",
":",
"return",
"np",
".",
"asfortranarray",
"(",
"self",
".",
"values",
".",
"reshape",
"(",
"(",
"self",
".",
"numRows",
",",
"self",
".",
"numCols",
")",
")",
")",
"else",
... | Return an numpy.ndarray
>>> m = DenseMatrix(2, 2, range(4))
>>> m.toArray()
array([[ 0., 2.],
[ 1., 3.]]) | [
"Return",
"an",
"numpy",
".",
"ndarray"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/linalg/__init__.py#L1082-L1095 | train | Return an array of the ISO - 8601 related data. | [
30522,
13366,
2000,
2906,
9447,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
2709,
2019,
16371,
8737,
2100,
1012,
1050,
7662,
9447,
1028,
1028,
1028,
1049,
1027,
9742,
18900,
17682,
1006,
1016,
1010,
1016,
1010,
2846,
1006,
1018,
1007,
1007,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index.putmask | def putmask(self, mask, value):
"""
Return a new Index of the values set with the mask.
See Also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self.... | python | def putmask(self, mask, value):
"""
Return a new Index of the values set with the mask.
See Also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self.... | [
"def",
"putmask",
"(",
"self",
",",
"mask",
",",
"value",
")",
":",
"values",
"=",
"self",
".",
"values",
".",
"copy",
"(",
")",
"try",
":",
"np",
".",
"putmask",
"(",
"values",
",",
"mask",
",",
"self",
".",
"_convert_for_op",
"(",
"value",
")",
... | Return a new Index of the values set with the mask.
See Also
--------
numpy.ndarray.putmask | [
"Return",
"a",
"new",
"Index",
"of",
"the",
"values",
"set",
"with",
"the",
"mask",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4025-L4042 | train | Put a value into the Index of the values set with the mask. | [
30522,
13366,
2404,
9335,
2243,
1006,
2969,
1010,
7308,
1010,
3643,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
2047,
5950,
1997,
1996,
5300,
2275,
2007,
1996,
7308,
1012,
2156,
2036,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
16371,
8... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._format_native_types | def _format_native_types(self, na_rep='', quoting=None, **kwargs):
"""
Actually format specific types of the index.
"""
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, d... | python | def _format_native_types(self, na_rep='', quoting=None, **kwargs):
"""
Actually format specific types of the index.
"""
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, d... | [
"def",
"_format_native_types",
"(",
"self",
",",
"na_rep",
"=",
"''",
",",
"quoting",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"mask",
"=",
"isna",
"(",
"self",
")",
"if",
"not",
"self",
".",
"is_object",
"(",
")",
"and",
"not",
"quoting",
"... | Actually format specific types of the index. | [
"Actually",
"format",
"specific",
"types",
"of",
"the",
"index",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1048-L1059 | train | Format the native types of the index. | [
30522,
13366,
1035,
4289,
1035,
3128,
1035,
4127,
1006,
2969,
1010,
6583,
1035,
16360,
1027,
1005,
1005,
1010,
27394,
1027,
3904,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
2941,
4289,
3563,
4127,
1997,
1996,
5950,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/clustering.py | StreamingKMeans.setHalfLife | def setHalfLife(self, halfLife, timeUnit):
"""
Set number of batches after which the centroids of that
particular batch has half the weightage.
"""
self._timeUnit = timeUnit
self._decayFactor = exp(log(0.5) / halfLife)
return self | python | def setHalfLife(self, halfLife, timeUnit):
"""
Set number of batches after which the centroids of that
particular batch has half the weightage.
"""
self._timeUnit = timeUnit
self._decayFactor = exp(log(0.5) / halfLife)
return self | [
"def",
"setHalfLife",
"(",
"self",
",",
"halfLife",
",",
"timeUnit",
")",
":",
"self",
".",
"_timeUnit",
"=",
"timeUnit",
"self",
".",
"_decayFactor",
"=",
"exp",
"(",
"log",
"(",
"0.5",
")",
"/",
"halfLife",
")",
"return",
"self"
] | Set number of batches after which the centroids of that
particular batch has half the weightage. | [
"Set",
"number",
"of",
"batches",
"after",
"which",
"the",
"centroids",
"of",
"that",
"particular",
"batch",
"has",
"half",
"the",
"weightage",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/clustering.py#L838-L845 | train | Sets the number of batches after which the centroids of that set have half the weightage. | [
30522,
13366,
6662,
2389,
10258,
29323,
1006,
2969,
1010,
2431,
15509,
1010,
2051,
19496,
2102,
1007,
1024,
1000,
1000,
1000,
2275,
2193,
1997,
14108,
2229,
2044,
2029,
1996,
18120,
9821,
1997,
2008,
3327,
14108,
2038,
2431,
1996,
3635,
427... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.foreachPartition | def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
... | python | def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
... | [
"def",
"foreachPartition",
"(",
"self",
",",
"f",
")",
":",
"def",
"func",
"(",
"it",
")",
":",
"r",
"=",
"f",
"(",
"it",
")",
"try",
":",
"return",
"iter",
"(",
"r",
")",
"except",
"TypeError",
":",
"return",
"iter",
"(",
"[",
"]",
")",
"self"... | Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f) | [
"Applies",
"a",
"function",
"to",
"each",
"partition",
"of",
"this",
"RDD",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L793-L808 | train | Applies a function to each partition of this RDD. | [
30522,
13366,
18921,
6776,
19362,
3775,
3508,
1006,
2969,
1010,
1042,
1007,
1024,
1000,
1000,
1000,
12033,
1037,
3853,
2000,
2169,
13571,
1997,
2023,
16428,
2094,
1012,
1028,
1028,
1028,
13366,
1042,
1006,
2009,
6906,
4263,
1007,
1024,
1012... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
huggingface/pytorch-pretrained-BERT | pytorch_pretrained_bert/tokenization.py | BasicTokenizer._run_strip_accents | def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
... | python | def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
... | [
"def",
"_run_strip_accents",
"(",
"self",
",",
"text",
")",
":",
"text",
"=",
"unicodedata",
".",
"normalize",
"(",
"\"NFD\"",
",",
"text",
")",
"output",
"=",
"[",
"]",
"for",
"char",
"in",
"text",
":",
"cat",
"=",
"unicodedata",
".",
"category",
"(",... | Strips accents from a piece of text. | [
"Strips",
"accents",
"from",
"a",
"piece",
"of",
"text",
"."
] | b832d5bb8a6dfc5965015b828e577677eace601e | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L236-L245 | train | Strips accents from a piece of text. | [
30522,
13366,
1035,
2448,
1035,
6167,
1035,
24947,
1006,
2969,
1010,
3793,
1007,
1024,
1000,
1000,
1000,
12970,
24947,
2013,
1037,
3538,
1997,
3793,
1012,
1000,
1000,
1000,
3793,
1027,
27260,
2850,
2696,
1012,
3671,
4697,
1006,
1000,
1050,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/catalog.py | Catalog.createExternalTable | def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If `... | python | def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If `... | [
"def",
"createExternalTable",
"(",
"self",
",",
"tableName",
",",
"path",
"=",
"None",
",",
"source",
"=",
"None",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"warnings",
".",
"warn",
"(",
"\"createExternalTable is deprecated since Spark 2.... | Creates a table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default... | [
"Creates",
"a",
"table",
"based",
"on",
"the",
"dataset",
"in",
"a",
"data",
"source",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/catalog.py#L142-L159 | train | Creates an external table based on the dataset in a data source. | [
30522,
13366,
3443,
10288,
16451,
2389,
10880,
1006,
2969,
1010,
2795,
18442,
1010,
4130,
1027,
3904,
1010,
3120,
1027,
3904,
1010,
8040,
28433,
1027,
3904,
1010,
1008,
1008,
7047,
1007,
1024,
1000,
1000,
1000,
9005,
1037,
2795,
2241,
2006,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/types.py | _parse_datatype_string | def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
... | python | def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
... | [
"def",
"_parse_datatype_string",
"(",
"s",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"def",
"from_ddl_schema",
"(",
"type_str",
")",
":",
"return",
"_parse_datatype_json_string",
"(",
"sc",
".",
"_jvm",
".",
"org",
".",
"apache",
".",
... | Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
of ``tinyint`` for :class:`ByteType`. We ... | [
"Parses",
"the",
"given",
"data",
"type",
"string",
"to",
"a",
":",
"class",
":",
"DataType",
".",
"The",
"data",
"type",
"string",
"format",
"equals",
"to",
":",
"class",
":",
"DataType",
".",
"simpleString",
"except",
"that",
"top",
"level",
"struct",
... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L758-L820 | train | Parses a string into a base - level structure type. | [
30522,
13366,
1035,
11968,
3366,
1035,
2951,
13874,
1035,
5164,
1006,
1055,
1007,
1024,
1000,
1000,
1000,
11968,
8583,
1996,
2445,
2951,
2828,
5164,
2000,
1037,
1024,
2465,
1024,
1036,
2951,
13874,
1036,
1012,
1996,
2951,
2828,
5164,
4289,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/types.py | _check_series_convert_timestamps_localize | def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the tim... | python | def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the tim... | [
"def",
"_check_series_convert_timestamps_localize",
"(",
"s",
",",
"from_timezone",
",",
"to_timezone",
")",
":",
"from",
"pyspark",
".",
"sql",
".",
"utils",
"import",
"require_minimum_pandas_version",
"require_minimum_pandas_version",
"(",
")",
"import",
"pandas",
"as... | Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it... | [
"Convert",
"timestamp",
"to",
"timezone",
"-",
"naive",
"in",
"the",
"specified",
"timezone",
"or",
"local",
"timezone"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1792-L1817 | train | Convert timestamp to timezone - naive in the specified timezone or local timezone. | [
30522,
13366,
1035,
4638,
1035,
2186,
1035,
10463,
1035,
2335,
15464,
4523,
1035,
2334,
4697,
1006,
1055,
1010,
2013,
1035,
2051,
15975,
1010,
2000,
1035,
2051,
15975,
1007,
1024,
1000,
1000,
1000,
10463,
2335,
15464,
2361,
2000,
2051,
1597... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/io/formats/css.py | CSSResolver.parse | def parse(self, declarations_str):
"""Generates (prop, value) pairs from declarations
In a future version may generate parsed tokens from tinycss/tinycss2
"""
for decl in declarations_str.split(';'):
if not decl.strip():
continue
prop, sep, val = ... | python | def parse(self, declarations_str):
"""Generates (prop, value) pairs from declarations
In a future version may generate parsed tokens from tinycss/tinycss2
"""
for decl in declarations_str.split(';'):
if not decl.strip():
continue
prop, sep, val = ... | [
"def",
"parse",
"(",
"self",
",",
"declarations_str",
")",
":",
"for",
"decl",
"in",
"declarations_str",
".",
"split",
"(",
"';'",
")",
":",
"if",
"not",
"decl",
".",
"strip",
"(",
")",
":",
"continue",
"prop",
",",
"sep",
",",
"val",
"=",
"decl",
... | Generates (prop, value) pairs from declarations
In a future version may generate parsed tokens from tinycss/tinycss2 | [
"Generates",
"(",
"prop",
"value",
")",
"pairs",
"from",
"declarations"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/css.py#L231-L247 | train | Parses a string of CSS attribute names into a list of property value pairs. | [
30522,
13366,
11968,
3366,
1006,
2969,
1010,
8170,
2015,
1035,
2358,
2099,
1007,
1024,
1000,
1000,
1000,
19421,
1006,
17678,
1010,
3643,
1007,
7689,
2013,
8170,
2015,
1999,
1037,
2925,
2544,
2089,
9699,
11968,
6924,
19204,
2015,
2013,
4714,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/groupby.py | GroupBy.rolling | def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs) | python | def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling functionality per group.
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs) | [
"def",
"rolling",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"core",
".",
"window",
"import",
"RollingGroupby",
"return",
"RollingGroupby",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Return a rolling grouper, providing rolling functionality per group. | [
"Return",
"a",
"rolling",
"grouper",
"providing",
"rolling",
"functionality",
"per",
"group",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1457-L1462 | train | Return a rolling grouper providing rolling functionality per group. | [
30522,
13366,
5291,
1006,
2969,
1010,
1008,
12098,
5620,
1010,
1008,
1008,
6448,
2906,
5620,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
5291,
2177,
2121,
1010,
4346,
5291,
15380,
2566,
2177,
1012,
1000,
1000,
1000,
2013,
25462,
2015,
1012,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/heapq3.py | heapify | def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2... | python | def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2... | [
"def",
"heapify",
"(",
"x",
")",
":",
"n",
"=",
"len",
"(",
"x",
")",
"# Transform bottom-up. The largest index there's any point to looking at",
"# is the largest with a child index in-range, so must have 2*i + 1 < n,",
"# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 ... | Transform list into a heap, in-place, in O(len(x)) time. | [
"Transform",
"list",
"into",
"a",
"heap",
"in",
"-",
"place",
"in",
"O",
"(",
"len",
"(",
"x",
"))",
"time",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/heapq3.py#L449-L458 | train | Transform list into a heap in - place in O ( len ( x ) time. | [
30522,
13366,
16721,
8757,
1006,
1060,
1007,
1024,
1000,
30524,
1000,
1000,
1000,
1050,
1027,
18798,
1006,
1060,
1007,
1001,
10938,
3953,
1011,
2039,
1012,
1996,
2922,
5950,
2045,
1005,
1055,
2151,
2391,
2000,
2559,
2012,
1001,
2003,
1996,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/traceback_utils.py | first_spark_call | def first_spark_call():
"""
Return a CallSite representing the first Spark call in the current call stack.
"""
tb = traceback.extract_stack()
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
... | python | def first_spark_call():
"""
Return a CallSite representing the first Spark call in the current call stack.
"""
tb = traceback.extract_stack()
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
... | [
"def",
"first_spark_call",
"(",
")",
":",
"tb",
"=",
"traceback",
".",
"extract_stack",
"(",
")",
"if",
"len",
"(",
"tb",
")",
"==",
"0",
":",
"return",
"None",
"file",
",",
"line",
",",
"module",
",",
"what",
"=",
"tb",
"[",
"len",
"(",
"tb",
")... | Return a CallSite representing the first Spark call in the current call stack. | [
"Return",
"a",
"CallSite",
"representing",
"the",
"first",
"Spark",
"call",
"in",
"the",
"current",
"call",
"stack",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/traceback_utils.py#L26-L46 | train | Return a CallSite representing the first Spark call in the current call stack. | [
30522,
13366,
2034,
1035,
12125,
1035,
2655,
1006,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
4455,
4221,
5052,
1996,
2034,
12125,
2655,
1999,
1996,
2783,
2655,
9991,
1012,
1000,
1000,
1000,
26419,
1027,
7637,
5963,
1012,
14817,
1035,
9991,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/panel.py | panel_index | def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame.
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List ... | python | def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame.
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List ... | [
"def",
"panel_index",
"(",
"time",
",",
"panels",
",",
"names",
"=",
"None",
")",
":",
"if",
"names",
"is",
"None",
":",
"names",
"=",
"[",
"'time'",
",",
"'panel'",
"]",
"time",
",",
"panels",
"=",
"_ensure_like_indices",
"(",
"time",
",",
"panels",
... | Returns a multi-index suitable for a panel-like DataFrame.
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
--... | [
"Returns",
"a",
"multi",
"-",
"index",
"suitable",
"for",
"a",
"panel",
"-",
"like",
"DataFrame",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L60-L101 | train | Returns a MultiIndex suitable for a panel - like DataFrame. | [
30522,
13366,
5997,
1035,
5950,
1006,
2051,
1010,
9320,
1010,
3415,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
4800,
1011,
5950,
7218,
2005,
1037,
5997,
1011,
2066,
2951,
15643,
1012,
11709,
1011,
1011,
1011,
1011,
1011,
1011,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/column.py | Column.substr | def substr(self, startPos, length):
"""
Return a :class:`Column` which is a substring of the column.
:param startPos: start position (int or Column)
:param length: length of the substring (int or Column)
>>> df.select(df.name.substr(1, 3).alias("col")).collect()
[Row(c... | python | def substr(self, startPos, length):
"""
Return a :class:`Column` which is a substring of the column.
:param startPos: start position (int or Column)
:param length: length of the substring (int or Column)
>>> df.select(df.name.substr(1, 3).alias("col")).collect()
[Row(c... | [
"def",
"substr",
"(",
"self",
",",
"startPos",
",",
"length",
")",
":",
"if",
"type",
"(",
"startPos",
")",
"!=",
"type",
"(",
"length",
")",
":",
"raise",
"TypeError",
"(",
"\"startPos and length must be the same type. \"",
"\"Got {startPos_t} and {length_t}, respe... | Return a :class:`Column` which is a substring of the column.
:param startPos: start position (int or Column)
:param length: length of the substring (int or Column)
>>> df.select(df.name.substr(1, 3).alias("col")).collect()
[Row(col=u'Ali'), Row(col=u'Bob')] | [
"Return",
"a",
":",
"class",
":",
"Column",
"which",
"is",
"a",
"substring",
"of",
"the",
"column",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/column.py#L403-L427 | train | Return a Column which is a substring of the column. | [
30522,
13366,
4942,
3367,
2099,
1006,
2969,
1010,
2707,
6873,
2015,
1010,
3091,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
1024,
2465,
1024,
1036,
5930,
1036,
2029,
2003,
1037,
4942,
3367,
4892,
1997,
1996,
5930,
1012,
1024,
11498,
2213,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/dataframe.py | DataFrame.repartition | def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
... | python | def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
... | [
"def",
"repartition",
"(",
"self",
",",
"numPartitions",
",",
"*",
"cols",
")",
":",
"if",
"isinstance",
"(",
"numPartitions",
",",
"int",
")",
":",
"if",
"len",
"(",
"cols",
")",
"==",
"0",
":",
"return",
"DataFrame",
"(",
"self",
".",
"_jdf",
".",
... | Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioni... | [
"Returns",
"a",
"new",
":",
"class",
":",
"DataFrame",
"partitioned",
"by",
"the",
"given",
"partitioning",
"expressions",
".",
"The",
"resulting",
"DataFrame",
"is",
"hash",
"partitioned",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L662-L721 | train | Returns a new DataFrame with the given number of partitions and the given columns. | [
30522,
13366,
16360,
8445,
22753,
1006,
2969,
1010,
16371,
8737,
8445,
22753,
2015,
1010,
1008,
8902,
2015,
1007,
1024,
1000,
1000,
1000,
5651,
1037,
2047,
1024,
2465,
1024,
1036,
2951,
15643,
1036,
13571,
2098,
2011,
1996,
2445,
13571,
207... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
huggingface/pytorch-pretrained-BERT | pytorch_pretrained_bert/tokenization.py | whitespace_tokenize | def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | python | def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | [
"def",
"whitespace_tokenize",
"(",
"text",
")",
":",
"text",
"=",
"text",
".",
"strip",
"(",
")",
"if",
"not",
"text",
":",
"return",
"[",
"]",
"tokens",
"=",
"text",
".",
"split",
"(",
")",
"return",
"tokens"
] | Runs basic whitespace cleaning and splitting on a piece of text. | [
"Runs",
"basic",
"whitespace",
"cleaning",
"and",
"splitting",
"on",
"a",
"piece",
"of",
"text",
"."
] | b832d5bb8a6dfc5965015b828e577677eace601e | https://github.com/huggingface/pytorch-pretrained-BERT/blob/b832d5bb8a6dfc5965015b828e577677eace601e/pytorch_pretrained_bert/tokenization.py#L65-L71 | train | Runs basic whitespace cleaning and splitting on a piece of text. | [
30522,
13366,
12461,
15327,
1035,
19204,
4697,
1006,
3793,
1007,
1024,
1000,
1000,
1000,
3216,
3937,
12461,
15327,
9344,
1998,
14541,
2006,
1037,
3538,
1997,
3793,
1012,
1000,
1000,
1000,
3793,
1027,
3793,
1012,
6167,
1006,
1007,
2065,
2025... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/grouper.py | _get_grouper | def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
observed=False, mutated=False, validate=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multip... | python | def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
observed=False, mutated=False, validate=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multip... | [
"def",
"_get_grouper",
"(",
"obj",
",",
"key",
"=",
"None",
",",
"axis",
"=",
"0",
",",
"level",
"=",
"None",
",",
"sort",
"=",
"True",
",",
"observed",
"=",
"False",
",",
"mutated",
"=",
"False",
",",
"validate",
"=",
"True",
")",
":",
"group_axis... | create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers... | [
"create",
"and",
"return",
"a",
"BaseGrouper",
"which",
"is",
"an",
"internal",
"mapping",
"of",
"how",
"to",
"create",
"the",
"grouper",
"indexers",
".",
"This",
"may",
"be",
"composed",
"of",
"multiple",
"Grouping",
"objects",
"indicating",
"multiple",
"grou... | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/grouper.py#L406-L612 | train | Internal helper function to create a Grouper for a given object. | [
30522,
13366,
1035,
2131,
1035,
2177,
2121,
1006,
27885,
3501,
1010,
3145,
1027,
3904,
1010,
8123,
1027,
1014,
1010,
2504,
1027,
3904,
1010,
4066,
1027,
2995,
1010,
5159,
1027,
6270,
1010,
14163,
16238,
1027,
6270,
1010,
9398,
3686,
1027,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/groupby/ops.py | BinGrouper.get_iterator | def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice... | python | def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice... | [
"def",
"get_iterator",
"(",
"self",
",",
"data",
",",
"axis",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"NDFrame",
")",
":",
"slicer",
"=",
"lambda",
"start",
",",
"edge",
":",
"data",
".",
"_slice",
"(",
"slice",
"(",
"start",
",",... | Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group | [
"Groupby",
"iterator"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L711-L735 | train | Returns an iterator over the set of items in the specified axis. | [
30522,
13366,
2131,
1035,
2009,
6906,
4263,
1006,
2969,
1010,
2951,
1010,
30524,
2005,
2169,
2177,
1000,
1000,
1000,
2065,
2003,
7076,
26897,
1006,
2951,
1010,
1050,
20952,
6444,
2063,
1007,
1024,
14704,
2099,
1027,
23375,
2707,
1010,
3341,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/missing.py | _akima_interpolate | def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted lis... | python | def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted lis... | [
"def",
"_akima_interpolate",
"(",
"xi",
",",
"yi",
",",
"x",
",",
"der",
"=",
"0",
",",
"axis",
"=",
"0",
")",
":",
"from",
"scipy",
"import",
"interpolate",
"try",
":",
"P",
"=",
"interpolate",
".",
"Akima1DInterpolator",
"(",
"xi",
",",
"yi",
",",
... | Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
... | [
"Convenience",
"function",
"for",
"akima",
"interpolation",
".",
"xi",
"and",
"yi",
"are",
"arrays",
"of",
"values",
"used",
"to",
"approximate",
"some",
"function",
"f",
"with",
"yi",
"=",
"f",
"(",
"xi",
")",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L358-L405 | train | A helper function for interpolation of a set of real values. | [
30522,
13366,
1035,
17712,
9581,
1035,
6970,
18155,
3686,
1006,
8418,
1010,
12316,
1010,
1060,
1010,
4315,
1027,
1014,
1010,
8123,
1027,
1014,
1007,
1024,
1000,
1000,
1000,
15106,
3853,
2005,
17712,
9581,
6970,
18155,
3370,
1012,
8418,
1998... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | ntile | def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is... | python | def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is... | [
"def",
"ntile",
"(",
"n",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"ntile",
"(",
"int",
"(",
"n",
")",
")",
")"
] | Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE f... | [
"Window",
"function",
":",
"returns",
"the",
"ntile",
"group",
"id",
"(",
"from",
"1",
"to",
"n",
"inclusive",
")",
"in",
"an",
"ordered",
"window",
"partition",
".",
"For",
"example",
"if",
"n",
"is",
"4",
"the",
"first",
"quarter",
"of",
"the",
"rows... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L872-L884 | train | This function returns the n - th ntile group id in an ordered window partition. | [
30522,
13366,
23961,
9463,
1006,
1050,
1007,
1024,
1000,
1000,
30524,
2003,
1018,
1010,
1996,
2034,
4284,
1997,
1996,
10281,
2097,
2131,
3643,
1015,
1010,
1996,
2117,
4284,
2097,
2131,
1016,
1010,
1996,
2353,
4284,
2097,
2131,
1017,
1010,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/dtypes/inference.py | is_dict_like | def is_dict_like(obj):
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, ... | python | def is_dict_like(obj):
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, ... | [
"def",
"is_dict_like",
"(",
"obj",
")",
":",
"dict_like_attrs",
"=",
"(",
"\"__getitem__\"",
",",
"\"keys\"",
",",
"\"__contains__\"",
")",
"return",
"(",
"all",
"(",
"hasattr",
"(",
"obj",
",",
"attr",
")",
"for",
"attr",
"in",
"dict_like_attrs",
")",
"# ... | Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(... | [
"Check",
"if",
"the",
"object",
"is",
"dict",
"-",
"like",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/inference.py#L373-L400 | train | Checks if the object is dict - like. | [
30522,
13366,
2003,
1035,
4487,
6593,
1035,
2066,
1006,
27885,
3501,
1007,
1024,
1000,
1000,
1000,
4638,
2065,
1996,
4874,
2003,
4487,
6593,
1011,
2066,
1012,
11709,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
1011,
27885,
3501,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/functions.py | locate | def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :cla... | python | def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :cla... | [
"def",
"locate",
"(",
"substr",
",",
"str",
",",
"pos",
"=",
"1",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"return",
"Column",
"(",
"sc",
".",
"_jvm",
".",
"functions",
".",
"locate",
"(",
"substr",
",",
"_to_java_column",
"(",
... | Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param... | [
"Locate",
"the",
"position",
"of",
"the",
"first",
"occurrence",
"of",
"substr",
"in",
"a",
"string",
"column",
"after",
"position",
"pos",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/functions.py#L1647-L1663 | train | Locate the first occurrence of substr in a string column after position pos. Returns 0 if substr could not be found in str. | [
30522,
13366,
12453,
1006,
4942,
3367,
2099,
1010,
2358,
2099,
1010,
13433,
2015,
1027,
1015,
1007,
1024,
1000,
1000,
1000,
12453,
1996,
2597,
1997,
1996,
2034,
14404,
1997,
4942,
3367,
2099,
1999,
1037,
5164,
5930,
1010,
2044,
2597,
13433,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/rdd.py | RDD.takeSample | def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(... | python | def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(... | [
"def",
"takeSample",
"(",
"self",
",",
"withReplacement",
",",
"num",
",",
"seed",
"=",
"None",
")",
":",
"numStDev",
"=",
"10.0",
"if",
"num",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Sample size cannot be negative.\"",
")",
"elif",
"num",
"==",
"0",... | Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
... | [
"Return",
"a",
"fixed",
"-",
"size",
"sampled",
"subset",
"of",
"this",
"RDD",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L465-L518 | train | Return a fixed - size sampled subset of this RDD. | [
30522,
13366,
3138,
16613,
2571,
1006,
2969,
1010,
2007,
2890,
24759,
10732,
3672,
1010,
16371,
2213,
1010,
6534,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
2709,
1037,
4964,
1011,
2946,
18925,
16745,
1997,
2023,
16428,
2094,
1012,
1012,
101... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/context.py | SparkContext.setSystemProperty | def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value) | python | def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value) | [
"def",
"setSystemProperty",
"(",
"cls",
",",
"key",
",",
"value",
")",
":",
"SparkContext",
".",
"_ensure_initialized",
"(",
")",
"SparkContext",
".",
"_jvm",
".",
"java",
".",
"lang",
".",
"System",
".",
"setProperty",
"(",
"key",
",",
"value",
")"
] | Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext. | [
"Set",
"a",
"Java",
"system",
"property",
"such",
"as",
"spark",
".",
"executor",
".",
"memory",
".",
"This",
"must",
"must",
"be",
"invoked",
"before",
"instantiating",
"SparkContext",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L372-L378 | train | Sets a Java system property such as spark. executor. memory. | [
30522,
13366,
4520,
27268,
6633,
21572,
4842,
3723,
1006,
18856,
2015,
1010,
3145,
1010,
3643,
1007,
1024,
1000,
1000,
1000,
2275,
1037,
9262,
2291,
3200,
1010,
2107,
2004,
12125,
1012,
4654,
8586,
16161,
2099,
1012,
3638,
1012,
2023,
2442,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/param/__init__.py | Params.copy | def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses s... | python | def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses s... | [
"def",
"copy",
"(",
"self",
",",
"extra",
"=",
"None",
")",
":",
"if",
"extra",
"is",
"None",
":",
"extra",
"=",
"dict",
"(",
")",
"that",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"that",
".",
"_paramMap",
"=",
"{",
"}",
"that",
".",
"_defau... | Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approa... | [
"Creates",
"a",
"copy",
"of",
"this",
"instance",
"with",
"the",
"same",
"uid",
"and",
"some",
"extra",
"params",
".",
"The",
"default",
"implementation",
"creates",
"a",
"shallow",
"copy",
"using",
":",
"py",
":",
"func",
":",
"copy",
".",
"copy",
"and"... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L368-L385 | train | Creates a shallow copy of this instance with the same uid and extra params. | [
30522,
13366,
6100,
1006,
2969,
1010,
4469,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
9005,
1037,
6100,
1997,
2023,
6013,
2007,
1996,
2168,
21318,
2094,
1998,
2070,
4469,
11498,
5244,
1012,
1996,
12398,
7375,
9005,
1037,
8467,
6100,
2478,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/taskcontext.py | BarrierTaskContext.getTaskInfos | def getTaskInfos(self):
"""
.. note:: Experimental
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supporte... | python | def getTaskInfos(self):
"""
.. note:: Experimental
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0
"""
if self._port is None or self._secret is None:
raise Exception("Not supporte... | [
"def",
"getTaskInfos",
"(",
"self",
")",
":",
"if",
"self",
".",
"_port",
"is",
"None",
"or",
"self",
".",
"_secret",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Not supported to call getTaskInfos() before initialize \"",
"+",
"\"BarrierTaskContext.\"",
")",
... | .. note:: Experimental
Returns :class:`BarrierTaskInfo` for all tasks in this barrier stage,
ordered by partition ID.
.. versionadded:: 2.4.0 | [
"..",
"note",
"::",
"Experimental"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/taskcontext.py#L191-L205 | train | Returns a list of BarrierTaskInfo objects for all tasks in this barrier stage ordered by partition ID. | [
30522,
13366,
2131,
10230,
4939,
14876,
2015,
1006,
2969,
1007,
1024,
1000,
1000,
1000,
1012,
1012,
3602,
1024,
1024,
6388,
5651,
1024,
2465,
1024,
1036,
8803,
10230,
4939,
14876,
1036,
2005,
2035,
8518,
1999,
2023,
8803,
2754,
1010,
3641,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/shuffle.py | ExternalMerger.mergeValues | def mergeValues(self, iterator):
""" Combine the items by creator and combiner """
# speedup attribute lookup
creator, comb = self.agg.createCombiner, self.agg.mergeValue
c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch
limit = self.memory_limit... | python | def mergeValues(self, iterator):
""" Combine the items by creator and combiner """
# speedup attribute lookup
creator, comb = self.agg.createCombiner, self.agg.mergeValue
c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch
limit = self.memory_limit... | [
"def",
"mergeValues",
"(",
"self",
",",
"iterator",
")",
":",
"# speedup attribute lookup",
"creator",
",",
"comb",
"=",
"self",
".",
"agg",
".",
"createCombiner",
",",
"self",
".",
"agg",
".",
"mergeValue",
"c",
",",
"data",
",",
"pdata",
",",
"hfun",
"... | Combine the items by creator and combiner | [
"Combine",
"the",
"items",
"by",
"creator",
"and",
"combiner"
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L231-L253 | train | Combine the items by creator and combiner | [
30522,
13366,
13590,
10175,
15808,
1006,
2969,
1010,
2009,
6906,
4263,
1007,
1024,
1000,
1000,
1000,
11506,
1996,
5167,
2011,
8543,
1998,
11506,
2099,
1000,
1000,
1000,
1001,
3177,
6279,
17961,
2298,
6279,
8543,
1010,
22863,
1027,
2969,
101... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/find_spark_home.py | _find_spark_home | def _find_spark_home():
"""Find the SPARK_HOME."""
# If the environment has SPARK_HOME set trust it.
if "SPARK_HOME" in os.environ:
return os.environ["SPARK_HOME"]
def is_spark_home(path):
"""Takes a path and returns true if the provided path could be a reasonable SPARK_HOME"""
... | python | def _find_spark_home():
"""Find the SPARK_HOME."""
# If the environment has SPARK_HOME set trust it.
if "SPARK_HOME" in os.environ:
return os.environ["SPARK_HOME"]
def is_spark_home(path):
"""Takes a path and returns true if the provided path could be a reasonable SPARK_HOME"""
... | [
"def",
"_find_spark_home",
"(",
")",
":",
"# If the environment has SPARK_HOME set trust it.",
"if",
"\"SPARK_HOME\"",
"in",
"os",
".",
"environ",
":",
"return",
"os",
".",
"environ",
"[",
"\"SPARK_HOME\"",
"]",
"def",
"is_spark_home",
"(",
"path",
")",
":",
"\"\"... | Find the SPARK_HOME. | [
"Find",
"the",
"SPARK_HOME",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/find_spark_home.py#L28-L71 | train | Find the SPARK_HOME. | [
30522,
13366,
1035,
2424,
1035,
12125,
1035,
2188,
1006,
1007,
1024,
1000,
1000,
1000,
2424,
1996,
12125,
1035,
2188,
1012,
1000,
1000,
1000,
1001,
2065,
1996,
4044,
2038,
12125,
1035,
2188,
2275,
3404,
2009,
1012,
2065,
1000,
12125,
1035,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/ml/param/__init__.py | Params.extractParamMap | def extractParamMap(self, extra=None):
"""
Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param val... | python | def extractParamMap(self, extra=None):
"""
Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param val... | [
"def",
"extractParamMap",
"(",
"self",
",",
"extra",
"=",
"None",
")",
":",
"if",
"extra",
"is",
"None",
":",
"extra",
"=",
"dict",
"(",
")",
"paramMap",
"=",
"self",
".",
"_defaultParamMap",
".",
"copy",
"(",
")",
"paramMap",
".",
"update",
"(",
"se... | Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param values <
user-supplied values < extra.
:param... | [
"Extracts",
"the",
"embedded",
"default",
"param",
"values",
"and",
"user",
"-",
"supplied",
"values",
"and",
"then",
"merges",
"them",
"with",
"extra",
"values",
"from",
"input",
"into",
"a",
"flat",
"param",
"map",
"where",
"the",
"latter",
"value",
"is",
... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/ml/param/__init__.py#L350-L366 | train | Extracts the embedded default param values and user - supplied param values and merges them with extra values into the param map. | [
30522,
13366,
14817,
28689,
14760,
2361,
1006,
2969,
1010,
4469,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
27059,
1996,
11157,
12398,
11498,
2213,
5300,
1998,
5310,
1011,
8127,
5300,
1010,
1998,
2059,
13590,
30524,
8127,
5300,
1026,
4469,
1... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._try_convert_to_int_index | def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
R... | python | def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
R... | [
"def",
"_try_convert_to_int_index",
"(",
"cls",
",",
"data",
",",
"copy",
",",
"name",
",",
"dtype",
")",
":",
"from",
".",
"numeric",
"import",
"Int64Index",
",",
"UInt64Index",
"if",
"not",
"is_unsigned_integer_dtype",
"(",
"dtype",
")",
":",
"# skip int64 c... | Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index... | [
"Attempt",
"to",
"convert",
"an",
"array",
"of",
"data",
"into",
"an",
"integer",
"index",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L3746-L3786 | train | Try to convert an array of data into an integer index. | [
30522,
13366,
1035,
3046,
1035,
10463,
1035,
2000,
1035,
20014,
1035,
5950,
1006,
18856,
2015,
1010,
2951,
1010,
6100,
1010,
2171,
1010,
26718,
18863,
1007,
1024,
1000,
1000,
1000,
3535,
2000,
10463,
2019,
9140,
1997,
2951,
2046,
2019,
1610... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/indexes/base.py | Index._get_reconciled_name_object | def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = get_op_result_name(self, other)
if self.name != name:
return se... | python | def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = get_op_result_name(self, other)
if self.name != name:
return se... | [
"def",
"_get_reconciled_name_object",
"(",
"self",
",",
"other",
")",
":",
"name",
"=",
"get_op_result_name",
"(",
"self",
",",
"other",
")",
"if",
"self",
".",
"name",
"!=",
"name",
":",
"return",
"self",
".",
"_shallow_copy",
"(",
"name",
"=",
"name",
... | If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self. | [
"If",
"the",
"result",
"of",
"a",
"set",
"operation",
"will",
"be",
"self",
"return",
"self",
"unless",
"the",
"name",
"changes",
"in",
"which",
"case",
"make",
"a",
"shallow",
"copy",
"of",
"self",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L2234-L2243 | train | Returns a shallow copy of self unless the name changes in which the set operation will be self. | [
30522,
13366,
1035,
2131,
1035,
28348,
1035,
2171,
1035,
4874,
1006,
2969,
1010,
2060,
1007,
1024,
1000,
1000,
1000,
2065,
1996,
2765,
1997,
1037,
2275,
3169,
2097,
2022,
2969,
1010,
2709,
2969,
1010,
4983,
1996,
2171,
3431,
1010,
1999,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/session.py | SparkSession.getActiveSession | def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
... | python | def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
... | [
"def",
"getActiveSession",
"(",
"cls",
")",
":",
"from",
"pyspark",
"import",
"SparkContext",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"if",
"sc",
"is",
"None",
":",
"return",
"None",
"else",
":",
"if",
"sc",
".",
"_jvm",
".",
"SparkSession",... | Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age... | [
"Returns",
"the",
"active",
"SparkSession",
"for",
"the",
"current",
"thread",
"returned",
"by",
"the",
"builder",
".",
">>>",
"s",
"=",
"SparkSession",
".",
"getActiveSession",
"()",
">>>",
"l",
"=",
"[",
"(",
"Alice",
"1",
")",
"]",
">>>",
"rdd",
"=",
... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L263-L282 | train | Returns the active SparkSession for the current thread. | [
30522,
13366,
2131,
19620,
8583,
10992,
1006,
18856,
2015,
1007,
1024,
1000,
1000,
1000,
5651,
1996,
3161,
12300,
7971,
3258,
2005,
1996,
2783,
11689,
1010,
2513,
2011,
1996,
12508,
1012,
1028,
1028,
1028,
1055,
1027,
12300,
7971,
3258,
101... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/_config/config.py | _build_option_description | def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = '{k} '.format(k=k)
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description avail... | python | def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = '{k} '.format(k=k)
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description avail... | [
"def",
"_build_option_description",
"(",
"k",
")",
":",
"o",
"=",
"_get_registered_option",
"(",
"k",
")",
"d",
"=",
"_get_deprecated_option",
"(",
"k",
")",
"s",
"=",
"'{k} '",
".",
"format",
"(",
"k",
"=",
"k",
")",
"if",
"o",
".",
"doc",
":",
"s",... | Builds a formatted description of a registered option and prints it | [
"Builds",
"a",
"formatted",
"description",
"of",
"a",
"registered",
"option",
"and",
"prints",
"it"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/config.py#L636-L659 | train | Builds a formatted description of a registered option and prints it | [
30522,
13366,
1035,
3857,
1035,
5724,
1035,
6412,
1006,
1047,
1007,
1024,
1000,
1000,
1000,
16473,
1037,
4289,
3064,
6412,
1997,
1037,
5068,
5724,
1998,
11204,
2009,
1000,
1000,
1000,
1051,
1027,
1035,
2131,
1035,
5068,
1035,
5724,
1006,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/classification.py | NaiveBayes.train | def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, i... | python | def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, i... | [
"def",
"train",
"(",
"cls",
",",
"data",
",",
"lambda_",
"=",
"1.0",
")",
":",
"first",
"=",
"data",
".",
"first",
"(",
")",
"if",
"not",
"isinstance",
"(",
"first",
",",
"LabeledPoint",
")",
":",
"raise",
"ValueError",
"(",
"\"`data` should be an RDD of... | Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By m... | [
"Train",
"a",
"Naive",
"Bayes",
"model",
"given",
"an",
"RDD",
"of",
"(",
"label",
"features",
")",
"vectors",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/classification.py#L657-L679 | train | Train a Naive Bayes model given an RDD of LabeledPoint vectors. | [
30522,
13366,
3345,
1006,
18856,
2015,
1010,
2951,
1010,
23375,
1035,
1027,
1015,
1012,
1014,
1007,
1024,
1000,
1000,
1000,
3345,
1037,
15743,
3016,
2229,
2944,
2445,
2019,
16428,
2094,
1997,
1006,
3830,
1010,
2838,
1007,
19019,
1012,
2023,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
pandas-dev/pandas | pandas/core/internals/blocks.py | Block.concat_same_type | def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._concatenator([blk.values for blk in to_concat],
axis=self.ndim - 1)
return self.make_block_same_class(
... | python | def concat_same_type(self, to_concat, placement=None):
"""
Concatenate list of single blocks of the same type.
"""
values = self._concatenator([blk.values for blk in to_concat],
axis=self.ndim - 1)
return self.make_block_same_class(
... | [
"def",
"concat_same_type",
"(",
"self",
",",
"to_concat",
",",
"placement",
"=",
"None",
")",
":",
"values",
"=",
"self",
".",
"_concatenator",
"(",
"[",
"blk",
".",
"values",
"for",
"blk",
"in",
"to_concat",
"]",
",",
"axis",
"=",
"self",
".",
"ndim",... | Concatenate list of single blocks of the same type. | [
"Concatenate",
"list",
"of",
"single",
"blocks",
"of",
"the",
"same",
"type",
"."
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L308-L315 | train | Concatenate a list of single blocks of the same type. | [
30522,
13366,
9530,
11266,
1035,
2168,
1035,
2828,
1006,
2969,
1010,
2000,
1035,
9530,
11266,
1010,
11073,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
9530,
16280,
12556,
2862,
1997,
2309,
5991,
1997,
1996,
2168,
2828,
1012,
1000,
1000,
1000,... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/sql/streaming.py | DataStreamWriter.start | def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None,
**options):
"""Streams the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the d... | python | def start(self, path=None, format=None, outputMode=None, partitionBy=None, queryName=None,
**options):
"""Streams the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the d... | [
"def",
"start",
"(",
"self",
",",
"path",
"=",
"None",
",",
"format",
"=",
"None",
",",
"outputMode",
"=",
"None",
",",
"partitionBy",
"=",
"None",
",",
"queryName",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"self",
".",
"options",
"(",
"*",... | Streams the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
.. note:: Evolving.
:para... | [
"Streams",
"the",
"contents",
"of",
"the",
":",
"class",
":",
"DataFrame",
"to",
"a",
"data",
"source",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L1073-L1128 | train | Streams the contents of the DataFrame to a data source. | [
30522,
13366,
2707,
1006,
2969,
1010,
4130,
1027,
3904,
1010,
4289,
1027,
3904,
1010,
6434,
5302,
3207,
1027,
3904,
1010,
13571,
3762,
1027,
3904,
1010,
23032,
18442,
1027,
3904,
1010,
1008,
1008,
7047,
1007,
1024,
1000,
1000,
1000,
9199,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/streaming/context.py | StreamingContext.getActiveOrCreate | def getActiveOrCreate(cls, checkpointPath, setupFunc):
"""
Either return the active StreamingContext (i.e. currently started but not stopped),
or recreate a StreamingContext from checkpoint data or create a new StreamingContext
using the provided setupFunc function. If the checkpointPath... | python | def getActiveOrCreate(cls, checkpointPath, setupFunc):
"""
Either return the active StreamingContext (i.e. currently started but not stopped),
or recreate a StreamingContext from checkpoint data or create a new StreamingContext
using the provided setupFunc function. If the checkpointPath... | [
"def",
"getActiveOrCreate",
"(",
"cls",
",",
"checkpointPath",
",",
"setupFunc",
")",
":",
"if",
"setupFunc",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"setupFunc cannot be None\"",
")",
"activeContext",
"=",
"cls",
".",
"getActive",
"(",
")",
"if",
"act... | Either return the active StreamingContext (i.e. currently started but not stopped),
or recreate a StreamingContext from checkpoint data or create a new StreamingContext
using the provided setupFunc function. If the checkpointPath is None or does not contain
valid checkpoint data, then setupFunc ... | [
"Either",
"return",
"the",
"active",
"StreamingContext",
"(",
"i",
".",
"e",
".",
"currently",
"started",
"but",
"not",
"stopped",
")",
"or",
"recreate",
"a",
"StreamingContext",
"from",
"checkpoint",
"data",
"or",
"create",
"a",
"new",
"StreamingContext",
"us... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L144-L166 | train | Returns the active StreamingContext or creates a new StreamingContext if it does not exist. | [
30522,
13366,
2131,
19620,
2953,
16748,
3686,
1006,
18856,
2015,
1010,
26520,
15069,
1010,
16437,
11263,
12273,
1007,
1024,
1000,
1000,
1000,
2593,
2709,
1996,
3161,
11058,
8663,
18209,
1006,
1045,
1012,
1041,
1012,
2747,
2318,
2021,
2025,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/context.py | SparkContext._serialize_to_jvm | def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
:param data:
:param serializer:
:param reader_func: A functio... | python | def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
:param data:
:param serializer:
:param reader_func: A functio... | [
"def",
"_serialize_to_jvm",
"(",
"self",
",",
"data",
",",
"serializer",
",",
"reader_func",
",",
"createRDDServer",
")",
":",
"if",
"self",
".",
"_encryption_enabled",
":",
"# with encryption, we open a server in java and send the data directly",
"server",
"=",
"createRD... | Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
:param data:
:param serializer:
:param reader_func: A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD. ... | [
"Using",
"py4j",
"to",
"send",
"a",
"large",
"dataset",
"to",
"the",
"jvm",
"is",
"really",
"slow",
"so",
"we",
"use",
"either",
"a",
"file",
"or",
"a",
"socket",
"if",
"we",
"have",
"encryption",
"enabled",
".",
":",
"param",
"data",
":",
":",
"para... | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/context.py#L531-L566 | train | Serialize data to the jvm and return a JavaRDD. | [
30522,
13366,
1035,
7642,
4697,
1035,
2000,
1035,
1046,
2615,
2213,
1006,
2969,
1010,
2951,
1010,
7642,
17629,
1010,
8068,
1035,
4569,
2278,
1010,
3443,
4103,
5104,
2121,
6299,
1007,
1024,
1000,
1000,
1000,
2478,
1052,
2100,
2549,
3501,
2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
apache/spark | python/pyspark/mllib/evaluation.py | MultilabelMetrics.precision | def precision(self, label=None):
"""
Returns precision or precision for a given label (category) if specified.
"""
if label is None:
return self.call("precision")
else:
return self.call("precision", float(label)) | python | def precision(self, label=None):
"""
Returns precision or precision for a given label (category) if specified.
"""
if label is None:
return self.call("precision")
else:
return self.call("precision", float(label)) | [
"def",
"precision",
"(",
"self",
",",
"label",
"=",
"None",
")",
":",
"if",
"label",
"is",
"None",
":",
"return",
"self",
".",
"call",
"(",
"\"precision\"",
")",
"else",
":",
"return",
"self",
".",
"call",
"(",
"\"precision\"",
",",
"float",
"(",
"la... | Returns precision or precision for a given label (category) if specified. | [
"Returns",
"precision",
"or",
"precision",
"for",
"a",
"given",
"label",
"(",
"category",
")",
"if",
"specified",
"."
] | 618d6bff71073c8c93501ab7392c3cc579730f0b | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/evaluation.py#L504-L511 | train | Returns precision for a given label | [
30522,
13366,
11718,
1006,
2969,
1010,
3830,
1027,
3904,
1007,
1024,
1000,
1000,
1000,
5651,
11718,
2030,
11718,
2005,
1037,
2445,
3830,
1006,
4696,
1007,
2065,
9675,
1012,
1000,
1000,
1000,
2065,
3830,
2003,
3904,
1024,
2709,
2969,
1012,
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1... | [
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100,
-100... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.