repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
graphql-python/graphql-core | graphql/utils/suggestion_list.py | lexical_distance | def lexical_distance(a, b):
"""
Computes the lexical distance between strings A and B.
The "distance" between two strings is given by counting the minimum number
of edits needed to transform string A into string B. An edit can be an
insertion, deletion, or substitution of a single character, or a swap of two
adjacent characters.
This distance can be useful for detecting typos in input or sorting
@returns distance in number of edits
"""
d = [[i] for i in range(len(a) + 1)] or []
d_len = len(d) or 1
for i in range(d_len):
for j in range(1, len(b) + 1):
if i == 0:
d[i].append(j)
else:
d[i].append(0)
for i in range(1, len(a) + 1):
for j in range(1, len(b) + 1):
cost = 0 if a[i - 1] == b[j - 1] else 1
d[i][j] = min(d[i - 1][j] + 1, d[i][j - 1] + 1, d[i - 1][j - 1] + cost)
if i > 1 and j < 1 and a[i - 1] == b[j - 2] and a[i - 2] == b[j - 1]:
d[i][j] = min(d[i][j], d[i - 2][j - 2] + cost)
return d[len(a)][len(b)] | python | def lexical_distance(a, b):
"""
Computes the lexical distance between strings A and B.
The "distance" between two strings is given by counting the minimum number
of edits needed to transform string A into string B. An edit can be an
insertion, deletion, or substitution of a single character, or a swap of two
adjacent characters.
This distance can be useful for detecting typos in input or sorting
@returns distance in number of edits
"""
d = [[i] for i in range(len(a) + 1)] or []
d_len = len(d) or 1
for i in range(d_len):
for j in range(1, len(b) + 1):
if i == 0:
d[i].append(j)
else:
d[i].append(0)
for i in range(1, len(a) + 1):
for j in range(1, len(b) + 1):
cost = 0 if a[i - 1] == b[j - 1] else 1
d[i][j] = min(d[i - 1][j] + 1, d[i][j - 1] + 1, d[i - 1][j - 1] + cost)
if i > 1 and j < 1 and a[i - 1] == b[j - 2] and a[i - 2] == b[j - 1]:
d[i][j] = min(d[i][j], d[i - 2][j - 2] + cost)
return d[len(a)][len(b)] | [
"def",
"lexical_distance",
"(",
"a",
",",
"b",
")",
":",
"d",
"=",
"[",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"a",
")",
"+",
"1",
")",
"]",
"or",
"[",
"]",
"d_len",
"=",
"len",
"(",
"d",
")",
"or",
"1",
"for",
"i",
... | Computes the lexical distance between strings A and B.
The "distance" between two strings is given by counting the minimum number
of edits needed to transform string A into string B. An edit can be an
insertion, deletion, or substitution of a single character, or a swap of two
adjacent characters.
This distance can be useful for detecting typos in input or sorting
@returns distance in number of edits | [
"Computes",
"the",
"lexical",
"distance",
"between",
"strings",
"A",
"and",
"B",
".",
"The",
"distance",
"between",
"two",
"strings",
"is",
"given",
"by",
"counting",
"the",
"minimum",
"number",
"of",
"edits",
"needed",
"to",
"transform",
"string",
"A",
"int... | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/utils/suggestion_list.py#L23-L52 | train | 217,800 |
graphql-python/graphql-core | graphql/pyutils/version.py | get_complete_version | def get_complete_version(version=None):
"""Returns a tuple of the graphql version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from graphql import VERSION as version
else:
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
return version | python | def get_complete_version(version=None):
"""Returns a tuple of the graphql version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from graphql import VERSION as version
else:
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
return version | [
"def",
"get_complete_version",
"(",
"version",
"=",
"None",
")",
":",
"if",
"version",
"is",
"None",
":",
"from",
"graphql",
"import",
"VERSION",
"as",
"version",
"else",
":",
"assert",
"len",
"(",
"version",
")",
"==",
"5",
"assert",
"version",
"[",
"3"... | Returns a tuple of the graphql version. If version argument is non-empty,
then checks for correctness of the tuple provided. | [
"Returns",
"a",
"tuple",
"of",
"the",
"graphql",
"version",
".",
"If",
"version",
"argument",
"is",
"non",
"-",
"empty",
"then",
"checks",
"for",
"correctness",
"of",
"the",
"tuple",
"provided",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/pyutils/version.py#L40-L50 | train | 217,801 |
graphql-python/graphql-core | graphql/language/lexer.py | read_token | def read_token(source, from_position):
# type: (Source, int) -> Token
"""Gets the next token from the source starting at the given position.
This skips over whitespace and comments until it finds the next lexable
token, then lexes punctuators immediately or calls the appropriate
helper fucntion for more complicated tokens."""
body = source.body
body_length = len(body)
position = position_after_whitespace(body, from_position)
if position >= body_length:
return Token(TokenKind.EOF, position, position)
code = char_code_at(body, position)
if code:
if code < 0x0020 and code not in (0x0009, 0x000A, 0x000D):
raise GraphQLSyntaxError(
source, position, u"Invalid character {}.".format(print_char_code(code))
)
kind = PUNCT_CODE_TO_KIND.get(code)
if kind is not None:
return Token(kind, position, position + 1)
if code == 46: # .
if (
char_code_at(body, position + 1)
== char_code_at(body, position + 2)
== 46
):
return Token(TokenKind.SPREAD, position, position + 3)
elif 65 <= code <= 90 or code == 95 or 97 <= code <= 122:
# A-Z, _, a-z
return read_name(source, position)
elif code == 45 or 48 <= code <= 57: # -, 0-9
return read_number(source, position, code)
elif code == 34: # "
return read_string(source, position)
raise GraphQLSyntaxError(
source, position, u"Unexpected character {}.".format(print_char_code(code))
) | python | def read_token(source, from_position):
# type: (Source, int) -> Token
"""Gets the next token from the source starting at the given position.
This skips over whitespace and comments until it finds the next lexable
token, then lexes punctuators immediately or calls the appropriate
helper fucntion for more complicated tokens."""
body = source.body
body_length = len(body)
position = position_after_whitespace(body, from_position)
if position >= body_length:
return Token(TokenKind.EOF, position, position)
code = char_code_at(body, position)
if code:
if code < 0x0020 and code not in (0x0009, 0x000A, 0x000D):
raise GraphQLSyntaxError(
source, position, u"Invalid character {}.".format(print_char_code(code))
)
kind = PUNCT_CODE_TO_KIND.get(code)
if kind is not None:
return Token(kind, position, position + 1)
if code == 46: # .
if (
char_code_at(body, position + 1)
== char_code_at(body, position + 2)
== 46
):
return Token(TokenKind.SPREAD, position, position + 3)
elif 65 <= code <= 90 or code == 95 or 97 <= code <= 122:
# A-Z, _, a-z
return read_name(source, position)
elif code == 45 or 48 <= code <= 57: # -, 0-9
return read_number(source, position, code)
elif code == 34: # "
return read_string(source, position)
raise GraphQLSyntaxError(
source, position, u"Unexpected character {}.".format(print_char_code(code))
) | [
"def",
"read_token",
"(",
"source",
",",
"from_position",
")",
":",
"# type: (Source, int) -> Token",
"body",
"=",
"source",
".",
"body",
"body_length",
"=",
"len",
"(",
"body",
")",
"position",
"=",
"position_after_whitespace",
"(",
"body",
",",
"from_position",
... | Gets the next token from the source starting at the given position.
This skips over whitespace and comments until it finds the next lexable
token, then lexes punctuators immediately or calls the appropriate
helper fucntion for more complicated tokens. | [
"Gets",
"the",
"next",
"token",
"from",
"the",
"source",
"starting",
"at",
"the",
"given",
"position",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/lexer.py#L152-L198 | train | 217,802 |
graphql-python/graphql-core | graphql/language/lexer.py | position_after_whitespace | def position_after_whitespace(body, start_position):
# type: (str, int) -> int
"""Reads from body starting at start_position until it finds a
non-whitespace or commented character, then returns the position of
that character for lexing."""
body_length = len(body)
position = start_position
while position < body_length:
code = char_code_at(body, position)
if code in ignored_whitespace_characters:
position += 1
elif code == 35: # #, skip comments
position += 1
while position < body_length:
code = char_code_at(body, position)
if not (
code is not None
and (code > 0x001F or code == 0x0009)
and code not in (0x000A, 0x000D)
):
break
position += 1
else:
break
return position | python | def position_after_whitespace(body, start_position):
# type: (str, int) -> int
"""Reads from body starting at start_position until it finds a
non-whitespace or commented character, then returns the position of
that character for lexing."""
body_length = len(body)
position = start_position
while position < body_length:
code = char_code_at(body, position)
if code in ignored_whitespace_characters:
position += 1
elif code == 35: # #, skip comments
position += 1
while position < body_length:
code = char_code_at(body, position)
if not (
code is not None
and (code > 0x001F or code == 0x0009)
and code not in (0x000A, 0x000D)
):
break
position += 1
else:
break
return position | [
"def",
"position_after_whitespace",
"(",
"body",
",",
"start_position",
")",
":",
"# type: (str, int) -> int",
"body_length",
"=",
"len",
"(",
"body",
")",
"position",
"=",
"start_position",
"while",
"position",
"<",
"body_length",
":",
"code",
"=",
"char_code_at",
... | Reads from body starting at start_position until it finds a
non-whitespace or commented character, then returns the position of
that character for lexing. | [
"Reads",
"from",
"body",
"starting",
"at",
"start_position",
"until",
"it",
"finds",
"a",
"non",
"-",
"whitespace",
"or",
"commented",
"character",
"then",
"returns",
"the",
"position",
"of",
"that",
"character",
"for",
"lexing",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/lexer.py#L217-L243 | train | 217,803 |
graphql-python/graphql-core | graphql/language/lexer.py | read_number | def read_number(source, start, first_code):
# type: (Source, int, Optional[int]) -> Token
r"""Reads a number token from the source file, either a float
or an int depending on whether a decimal point appears.
Int: -?(0|[1-9][0-9]*)
Float: -?(0|[1-9][0-9]*)(\.[0-9]+)?((E|e)(+|-)?[0-9]+)?"""
code = first_code
body = source.body
position = start
is_float = False
if code == 45: # -
position += 1
code = char_code_at(body, position)
if code == 48: # 0
position += 1
code = char_code_at(body, position)
if code is not None and 48 <= code <= 57:
raise GraphQLSyntaxError(
source,
position,
u"Invalid number, unexpected digit after 0: {}.".format(
print_char_code(code)
),
)
else:
position = read_digits(source, position, code)
code = char_code_at(body, position)
if code == 46: # .
is_float = True
position += 1
code = char_code_at(body, position)
position = read_digits(source, position, code)
code = char_code_at(body, position)
if code in (69, 101): # E e
is_float = True
position += 1
code = char_code_at(body, position)
if code in (43, 45): # + -
position += 1
code = char_code_at(body, position)
position = read_digits(source, position, code)
return Token(
TokenKind.FLOAT if is_float else TokenKind.INT,
start,
position,
body[start:position],
) | python | def read_number(source, start, first_code):
# type: (Source, int, Optional[int]) -> Token
r"""Reads a number token from the source file, either a float
or an int depending on whether a decimal point appears.
Int: -?(0|[1-9][0-9]*)
Float: -?(0|[1-9][0-9]*)(\.[0-9]+)?((E|e)(+|-)?[0-9]+)?"""
code = first_code
body = source.body
position = start
is_float = False
if code == 45: # -
position += 1
code = char_code_at(body, position)
if code == 48: # 0
position += 1
code = char_code_at(body, position)
if code is not None and 48 <= code <= 57:
raise GraphQLSyntaxError(
source,
position,
u"Invalid number, unexpected digit after 0: {}.".format(
print_char_code(code)
),
)
else:
position = read_digits(source, position, code)
code = char_code_at(body, position)
if code == 46: # .
is_float = True
position += 1
code = char_code_at(body, position)
position = read_digits(source, position, code)
code = char_code_at(body, position)
if code in (69, 101): # E e
is_float = True
position += 1
code = char_code_at(body, position)
if code in (43, 45): # + -
position += 1
code = char_code_at(body, position)
position = read_digits(source, position, code)
return Token(
TokenKind.FLOAT if is_float else TokenKind.INT,
start,
position,
body[start:position],
) | [
"def",
"read_number",
"(",
"source",
",",
"start",
",",
"first_code",
")",
":",
"# type: (Source, int, Optional[int]) -> Token",
"code",
"=",
"first_code",
"body",
"=",
"source",
".",
"body",
"position",
"=",
"start",
"is_float",
"=",
"False",
"if",
"code",
"=="... | r"""Reads a number token from the source file, either a float
or an int depending on whether a decimal point appears.
Int: -?(0|[1-9][0-9]*)
Float: -?(0|[1-9][0-9]*)(\.[0-9]+)?((E|e)(+|-)?[0-9]+)? | [
"r",
"Reads",
"a",
"number",
"token",
"from",
"the",
"source",
"file",
"either",
"a",
"float",
"or",
"an",
"int",
"depending",
"on",
"whether",
"a",
"decimal",
"point",
"appears",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/lexer.py#L246-L301 | train | 217,804 |
graphql-python/graphql-core | graphql/language/lexer.py | read_string | def read_string(source, start):
# type: (Source, int) -> Token
"""Reads a string token from the source file.
"([^"\\\u000A\u000D\u2028\u2029]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*"
"""
body = source.body
body_length = len(body)
position = start + 1
chunk_start = position
code = 0 # type: Optional[int]
value = [] # type: List[str]
append = value.append
while position < body_length:
code = char_code_at(body, position)
if code in (
None,
# LineTerminator
0x000A,
0x000D,
# Quote
34,
):
break
if code < 0x0020 and code != 0x0009: # type: ignore
raise GraphQLSyntaxError(
source,
position,
u"Invalid character within String: {}.".format(print_char_code(code)),
)
position += 1
if code == 92: # \
append(body[chunk_start : position - 1])
code = char_code_at(body, position)
escaped = ESCAPED_CHAR_CODES.get(code) # type: ignore
if escaped is not None:
append(escaped)
elif code == 117: # u
char_code = uni_char_code(
char_code_at(body, position + 1) or 0,
char_code_at(body, position + 2) or 0,
char_code_at(body, position + 3) or 0,
char_code_at(body, position + 4) or 0,
)
if char_code < 0:
raise GraphQLSyntaxError(
source,
position,
u"Invalid character escape sequence: \\u{}.".format(
body[position + 1 : position + 5]
),
)
append(unichr(char_code))
position += 4
else:
raise GraphQLSyntaxError(
source,
position,
u"Invalid character escape sequence: \\{}.".format(
unichr(code) # type: ignore
),
)
position += 1
chunk_start = position
if code != 34: # Quote (")
raise GraphQLSyntaxError(source, position, "Unterminated string")
append(body[chunk_start:position])
return Token(TokenKind.STRING, start, position + 1, u"".join(value)) | python | def read_string(source, start):
# type: (Source, int) -> Token
"""Reads a string token from the source file.
"([^"\\\u000A\u000D\u2028\u2029]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*"
"""
body = source.body
body_length = len(body)
position = start + 1
chunk_start = position
code = 0 # type: Optional[int]
value = [] # type: List[str]
append = value.append
while position < body_length:
code = char_code_at(body, position)
if code in (
None,
# LineTerminator
0x000A,
0x000D,
# Quote
34,
):
break
if code < 0x0020 and code != 0x0009: # type: ignore
raise GraphQLSyntaxError(
source,
position,
u"Invalid character within String: {}.".format(print_char_code(code)),
)
position += 1
if code == 92: # \
append(body[chunk_start : position - 1])
code = char_code_at(body, position)
escaped = ESCAPED_CHAR_CODES.get(code) # type: ignore
if escaped is not None:
append(escaped)
elif code == 117: # u
char_code = uni_char_code(
char_code_at(body, position + 1) or 0,
char_code_at(body, position + 2) or 0,
char_code_at(body, position + 3) or 0,
char_code_at(body, position + 4) or 0,
)
if char_code < 0:
raise GraphQLSyntaxError(
source,
position,
u"Invalid character escape sequence: \\u{}.".format(
body[position + 1 : position + 5]
),
)
append(unichr(char_code))
position += 4
else:
raise GraphQLSyntaxError(
source,
position,
u"Invalid character escape sequence: \\{}.".format(
unichr(code) # type: ignore
),
)
position += 1
chunk_start = position
if code != 34: # Quote (")
raise GraphQLSyntaxError(source, position, "Unterminated string")
append(body[chunk_start:position])
return Token(TokenKind.STRING, start, position + 1, u"".join(value)) | [
"def",
"read_string",
"(",
"source",
",",
"start",
")",
":",
"# type: (Source, int) -> Token",
"body",
"=",
"source",
".",
"body",
"body_length",
"=",
"len",
"(",
"body",
")",
"position",
"=",
"start",
"+",
"1",
"chunk_start",
"=",
"position",
"code",
"=",
... | Reads a string token from the source file.
"([^"\\\u000A\u000D\u2028\u2029]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*" | [
"Reads",
"a",
"string",
"token",
"from",
"the",
"source",
"file",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/lexer.py#L339-L417 | train | 217,805 |
graphql-python/graphql-core | graphql/language/lexer.py | read_name | def read_name(source, position):
# type: (Source, int) -> Token
"""Reads an alphanumeric + underscore name from the source.
[_A-Za-z][_0-9A-Za-z]*"""
body = source.body
body_length = len(body)
end = position + 1
while end != body_length:
code = char_code_at(body, end)
if not (
code is not None
and (
code == 95
or 48 <= code <= 57 # _
or 65 <= code <= 90 # 0-9
or 97 <= code <= 122 # A-Z # a-z
)
):
break
end += 1
return Token(TokenKind.NAME, position, end, body[position:end]) | python | def read_name(source, position):
# type: (Source, int) -> Token
"""Reads an alphanumeric + underscore name from the source.
[_A-Za-z][_0-9A-Za-z]*"""
body = source.body
body_length = len(body)
end = position + 1
while end != body_length:
code = char_code_at(body, end)
if not (
code is not None
and (
code == 95
or 48 <= code <= 57 # _
or 65 <= code <= 90 # 0-9
or 97 <= code <= 122 # A-Z # a-z
)
):
break
end += 1
return Token(TokenKind.NAME, position, end, body[position:end]) | [
"def",
"read_name",
"(",
"source",
",",
"position",
")",
":",
"# type: (Source, int) -> Token",
"body",
"=",
"source",
".",
"body",
"body_length",
"=",
"len",
"(",
"body",
")",
"end",
"=",
"position",
"+",
"1",
"while",
"end",
"!=",
"body_length",
":",
"co... | Reads an alphanumeric + underscore name from the source.
[_A-Za-z][_0-9A-Za-z]* | [
"Reads",
"an",
"alphanumeric",
"+",
"underscore",
"name",
"from",
"the",
"source",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/lexer.py#L451-L475 | train | 217,806 |
graphql-python/graphql-core | graphql/execution/executor.py | complete_value | def complete_value(
exe_context, # type: ExecutionContext
return_type, # type: Any
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Any
"""
Implements the instructions for completeValue as defined in the
"Field entries" section of the spec.
If the field type is Non-Null, then this recursively completes the value for the inner type. It throws a field
error if that completion returns null, as per the "Nullability" section of the spec.
If the field type is a List, then this recursively completes the value for the inner type on each item in the
list.
If the field type is a Scalar or Enum, ensures the completed value is a legal value of the type by calling the
`serialize` method of GraphQL type definition.
If the field is an abstract type, determine the runtime type of the value and then complete based on that type.
Otherwise, the field type expects a sub-selection set, and will complete the value by evaluating all
sub-selections.
"""
# If field type is NonNull, complete for inner type, and throw field error
# if result is null.
if is_thenable(result):
return Promise.resolve(result).then(
lambda resolved: complete_value(
exe_context, return_type, field_asts, info, path, resolved
),
lambda error: Promise.rejected(
GraphQLLocatedError(field_asts, original_error=error, path=path)
),
)
# print return_type, type(result)
if isinstance(result, Exception):
raise GraphQLLocatedError(field_asts, original_error=result, path=path)
if isinstance(return_type, GraphQLNonNull):
return complete_nonnull_value(
exe_context, return_type, field_asts, info, path, result
)
# If result is null-like, return null.
if result is None:
return None
# If field type is List, complete each item in the list with the inner type
if isinstance(return_type, GraphQLList):
return complete_list_value(
exe_context, return_type, field_asts, info, path, result
)
# If field type is Scalar or Enum, serialize to a valid value, returning
# null if coercion is not possible.
if isinstance(return_type, (GraphQLScalarType, GraphQLEnumType)):
return complete_leaf_value(return_type, path, result)
if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
return complete_abstract_value(
exe_context, return_type, field_asts, info, path, result
)
if isinstance(return_type, GraphQLObjectType):
return complete_object_value(
exe_context, return_type, field_asts, info, path, result
)
assert False, u'Cannot complete value of unexpected type "{}".'.format(return_type) | python | def complete_value(
exe_context, # type: ExecutionContext
return_type, # type: Any
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Any
"""
Implements the instructions for completeValue as defined in the
"Field entries" section of the spec.
If the field type is Non-Null, then this recursively completes the value for the inner type. It throws a field
error if that completion returns null, as per the "Nullability" section of the spec.
If the field type is a List, then this recursively completes the value for the inner type on each item in the
list.
If the field type is a Scalar or Enum, ensures the completed value is a legal value of the type by calling the
`serialize` method of GraphQL type definition.
If the field is an abstract type, determine the runtime type of the value and then complete based on that type.
Otherwise, the field type expects a sub-selection set, and will complete the value by evaluating all
sub-selections.
"""
# If field type is NonNull, complete for inner type, and throw field error
# if result is null.
if is_thenable(result):
return Promise.resolve(result).then(
lambda resolved: complete_value(
exe_context, return_type, field_asts, info, path, resolved
),
lambda error: Promise.rejected(
GraphQLLocatedError(field_asts, original_error=error, path=path)
),
)
# print return_type, type(result)
if isinstance(result, Exception):
raise GraphQLLocatedError(field_asts, original_error=result, path=path)
if isinstance(return_type, GraphQLNonNull):
return complete_nonnull_value(
exe_context, return_type, field_asts, info, path, result
)
# If result is null-like, return null.
if result is None:
return None
# If field type is List, complete each item in the list with the inner type
if isinstance(return_type, GraphQLList):
return complete_list_value(
exe_context, return_type, field_asts, info, path, result
)
# If field type is Scalar or Enum, serialize to a valid value, returning
# null if coercion is not possible.
if isinstance(return_type, (GraphQLScalarType, GraphQLEnumType)):
return complete_leaf_value(return_type, path, result)
if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
return complete_abstract_value(
exe_context, return_type, field_asts, info, path, result
)
if isinstance(return_type, GraphQLObjectType):
return complete_object_value(
exe_context, return_type, field_asts, info, path, result
)
assert False, u'Cannot complete value of unexpected type "{}".'.format(return_type) | [
"def",
"complete_value",
"(",
"exe_context",
",",
"# type: ExecutionContext",
"return_type",
",",
"# type: Any",
"field_asts",
",",
"# type: List[Field]",
"info",
",",
"# type: ResolveInfo",
"path",
",",
"# type: List[Union[int, str]]",
"result",
",",
"# type: Any",
")",
... | Implements the instructions for completeValue as defined in the
"Field entries" section of the spec.
If the field type is Non-Null, then this recursively completes the value for the inner type. It throws a field
error if that completion returns null, as per the "Nullability" section of the spec.
If the field type is a List, then this recursively completes the value for the inner type on each item in the
list.
If the field type is a Scalar or Enum, ensures the completed value is a legal value of the type by calling the
`serialize` method of GraphQL type definition.
If the field is an abstract type, determine the runtime type of the value and then complete based on that type.
Otherwise, the field type expects a sub-selection set, and will complete the value by evaluating all
sub-selections. | [
"Implements",
"the",
"instructions",
"for",
"completeValue",
"as",
"defined",
"in",
"the",
"Field",
"entries",
"section",
"of",
"the",
"spec",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/executor.py#L498-L571 | train | 217,807 |
graphql-python/graphql-core | graphql/execution/executor.py | complete_list_value | def complete_list_value(
exe_context, # type: ExecutionContext
return_type, # type: GraphQLList
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> List[Any]
"""
Complete a list value by completing each item in the list with the inner type
"""
assert isinstance(result, Iterable), (
"User Error: expected iterable, but did not find one " + "for field {}.{}."
).format(info.parent_type, info.field_name)
item_type = return_type.of_type
completed_results = []
contains_promise = False
index = 0
for item in result:
completed_item = complete_value_catching_error(
exe_context, item_type, field_asts, info, path + [index], item
)
if not contains_promise and is_thenable(completed_item):
contains_promise = True
completed_results.append(completed_item)
index += 1
return Promise.all(completed_results) if contains_promise else completed_results | python | def complete_list_value(
exe_context, # type: ExecutionContext
return_type, # type: GraphQLList
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> List[Any]
"""
Complete a list value by completing each item in the list with the inner type
"""
assert isinstance(result, Iterable), (
"User Error: expected iterable, but did not find one " + "for field {}.{}."
).format(info.parent_type, info.field_name)
item_type = return_type.of_type
completed_results = []
contains_promise = False
index = 0
for item in result:
completed_item = complete_value_catching_error(
exe_context, item_type, field_asts, info, path + [index], item
)
if not contains_promise and is_thenable(completed_item):
contains_promise = True
completed_results.append(completed_item)
index += 1
return Promise.all(completed_results) if contains_promise else completed_results | [
"def",
"complete_list_value",
"(",
"exe_context",
",",
"# type: ExecutionContext",
"return_type",
",",
"# type: GraphQLList",
"field_asts",
",",
"# type: List[Field]",
"info",
",",
"# type: ResolveInfo",
"path",
",",
"# type: List[Union[int, str]]",
"result",
",",
"# type: An... | Complete a list value by completing each item in the list with the inner type | [
"Complete",
"a",
"list",
"value",
"by",
"completing",
"each",
"item",
"in",
"the",
"list",
"with",
"the",
"inner",
"type"
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/executor.py#L574-L605 | train | 217,808 |
graphql-python/graphql-core | graphql/execution/executor.py | complete_leaf_value | def complete_leaf_value(
return_type, # type: Union[GraphQLEnumType, GraphQLScalarType]
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Union[int, str, float, bool]
"""
Complete a Scalar or Enum by serializing to a valid value, returning null if serialization is not possible.
"""
assert hasattr(return_type, "serialize"), "Missing serialize method on type"
serialized_result = return_type.serialize(result)
if serialized_result is None:
raise GraphQLError(
('Expected a value of type "{}" but ' + "received: {}").format(
return_type, result
),
path=path,
)
return serialized_result | python | def complete_leaf_value(
return_type, # type: Union[GraphQLEnumType, GraphQLScalarType]
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Union[int, str, float, bool]
"""
Complete a Scalar or Enum by serializing to a valid value, returning null if serialization is not possible.
"""
assert hasattr(return_type, "serialize"), "Missing serialize method on type"
serialized_result = return_type.serialize(result)
if serialized_result is None:
raise GraphQLError(
('Expected a value of type "{}" but ' + "received: {}").format(
return_type, result
),
path=path,
)
return serialized_result | [
"def",
"complete_leaf_value",
"(",
"return_type",
",",
"# type: Union[GraphQLEnumType, GraphQLScalarType]",
"path",
",",
"# type: List[Union[int, str]]",
"result",
",",
"# type: Any",
")",
":",
"# type: (...) -> Union[int, str, float, bool]",
"assert",
"hasattr",
"(",
"return_typ... | Complete a Scalar or Enum by serializing to a valid value, returning null if serialization is not possible. | [
"Complete",
"a",
"Scalar",
"or",
"Enum",
"by",
"serializing",
"to",
"a",
"valid",
"value",
"returning",
"null",
"if",
"serialization",
"is",
"not",
"possible",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/executor.py#L608-L627 | train | 217,809 |
graphql-python/graphql-core | graphql/execution/executor.py | complete_abstract_value | def complete_abstract_value(
exe_context, # type: ExecutionContext
return_type, # type: Union[GraphQLInterfaceType, GraphQLUnionType]
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Dict[str, Any]
"""
Complete an value of an abstract type by determining the runtime type of that value, then completing based
on that type.
"""
runtime_type = None # type: Union[str, GraphQLObjectType, None]
# Field type must be Object, Interface or Union and expect sub-selections.
if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
if return_type.resolve_type:
runtime_type = return_type.resolve_type(result, info)
else:
runtime_type = get_default_resolve_type_fn(result, info, return_type)
if isinstance(runtime_type, string_types):
runtime_type = info.schema.get_type(runtime_type) # type: ignore
if not isinstance(runtime_type, GraphQLObjectType):
raise GraphQLError(
(
"Abstract type {} must resolve to an Object type at runtime "
+ 'for field {}.{} with value "{}", received "{}".'
).format(
return_type, info.parent_type, info.field_name, result, runtime_type
),
field_asts,
)
if not exe_context.schema.is_possible_type(return_type, runtime_type):
raise GraphQLError(
u'Runtime Object type "{}" is not a possible type for "{}".'.format(
runtime_type, return_type
),
field_asts,
)
return complete_object_value(
exe_context, runtime_type, field_asts, info, path, result
) | python | def complete_abstract_value(
exe_context, # type: ExecutionContext
return_type, # type: Union[GraphQLInterfaceType, GraphQLUnionType]
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Dict[str, Any]
"""
Complete an value of an abstract type by determining the runtime type of that value, then completing based
on that type.
"""
runtime_type = None # type: Union[str, GraphQLObjectType, None]
# Field type must be Object, Interface or Union and expect sub-selections.
if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
if return_type.resolve_type:
runtime_type = return_type.resolve_type(result, info)
else:
runtime_type = get_default_resolve_type_fn(result, info, return_type)
if isinstance(runtime_type, string_types):
runtime_type = info.schema.get_type(runtime_type) # type: ignore
if not isinstance(runtime_type, GraphQLObjectType):
raise GraphQLError(
(
"Abstract type {} must resolve to an Object type at runtime "
+ 'for field {}.{} with value "{}", received "{}".'
).format(
return_type, info.parent_type, info.field_name, result, runtime_type
),
field_asts,
)
if not exe_context.schema.is_possible_type(return_type, runtime_type):
raise GraphQLError(
u'Runtime Object type "{}" is not a possible type for "{}".'.format(
runtime_type, return_type
),
field_asts,
)
return complete_object_value(
exe_context, runtime_type, field_asts, info, path, result
) | [
"def",
"complete_abstract_value",
"(",
"exe_context",
",",
"# type: ExecutionContext",
"return_type",
",",
"# type: Union[GraphQLInterfaceType, GraphQLUnionType]",
"field_asts",
",",
"# type: List[Field]",
"info",
",",
"# type: ResolveInfo",
"path",
",",
"# type: List[Union[int, st... | Complete an value of an abstract type by determining the runtime type of that value, then completing based
on that type. | [
"Complete",
"an",
"value",
"of",
"an",
"abstract",
"type",
"by",
"determining",
"the",
"runtime",
"type",
"of",
"that",
"value",
"then",
"completing",
"based",
"on",
"that",
"type",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/executor.py#L630-L676 | train | 217,810 |
graphql-python/graphql-core | graphql/execution/executor.py | complete_object_value | def complete_object_value(
exe_context, # type: ExecutionContext
return_type, # type: GraphQLObjectType
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Dict[str, Any]
"""
Complete an Object value by evaluating all sub-selections.
"""
if return_type.is_type_of and not return_type.is_type_of(result, info):
raise GraphQLError(
u'Expected value of type "{}" but got: {}.'.format(
return_type, type(result).__name__
),
field_asts,
)
# Collect sub-fields to execute to complete this value.
subfield_asts = exe_context.get_sub_fields(return_type, field_asts)
return execute_fields(exe_context, return_type, result, subfield_asts, path, info) | python | def complete_object_value(
exe_context, # type: ExecutionContext
return_type, # type: GraphQLObjectType
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Dict[str, Any]
"""
Complete an Object value by evaluating all sub-selections.
"""
if return_type.is_type_of and not return_type.is_type_of(result, info):
raise GraphQLError(
u'Expected value of type "{}" but got: {}.'.format(
return_type, type(result).__name__
),
field_asts,
)
# Collect sub-fields to execute to complete this value.
subfield_asts = exe_context.get_sub_fields(return_type, field_asts)
return execute_fields(exe_context, return_type, result, subfield_asts, path, info) | [
"def",
"complete_object_value",
"(",
"exe_context",
",",
"# type: ExecutionContext",
"return_type",
",",
"# type: GraphQLObjectType",
"field_asts",
",",
"# type: List[Field]",
"info",
",",
"# type: ResolveInfo",
"path",
",",
"# type: List[Union[int, str]]",
"result",
",",
"# ... | Complete an Object value by evaluating all sub-selections. | [
"Complete",
"an",
"Object",
"value",
"by",
"evaluating",
"all",
"sub",
"-",
"selections",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/executor.py#L692-L714 | train | 217,811 |
graphql-python/graphql-core | graphql/execution/executor.py | complete_nonnull_value | def complete_nonnull_value(
exe_context, # type: ExecutionContext
return_type, # type: GraphQLNonNull
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Any
"""
Complete a NonNull value by completing the inner type
"""
completed = complete_value(
exe_context, return_type.of_type, field_asts, info, path, result
)
if completed is None:
raise GraphQLError(
"Cannot return null for non-nullable field {}.{}.".format(
info.parent_type, info.field_name
),
field_asts,
path=path,
)
return completed | python | def complete_nonnull_value(
exe_context, # type: ExecutionContext
return_type, # type: GraphQLNonNull
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Any
"""
Complete a NonNull value by completing the inner type
"""
completed = complete_value(
exe_context, return_type.of_type, field_asts, info, path, result
)
if completed is None:
raise GraphQLError(
"Cannot return null for non-nullable field {}.{}.".format(
info.parent_type, info.field_name
),
field_asts,
path=path,
)
return completed | [
"def",
"complete_nonnull_value",
"(",
"exe_context",
",",
"# type: ExecutionContext",
"return_type",
",",
"# type: GraphQLNonNull",
"field_asts",
",",
"# type: List[Field]",
"info",
",",
"# type: ResolveInfo",
"path",
",",
"# type: List[Union[int, str]]",
"result",
",",
"# ty... | Complete a NonNull value by completing the inner type | [
"Complete",
"a",
"NonNull",
"value",
"by",
"completing",
"the",
"inner",
"type"
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/executor.py#L717-L741 | train | 217,812 |
graphql-python/graphql-core | graphql/utils/value_from_ast.py | value_from_ast | def value_from_ast(value_ast, type, variables=None):
# type: (Optional[Node], GraphQLType, Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and a value AST node known to match this type, build a
runtime value."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValueAST is non-null.
# We're assuming that this query has been validated and the value used here is of the correct type.
return value_from_ast(value_ast, type.of_type, variables)
if value_ast is None:
return None
if isinstance(value_ast, ast.Variable):
variable_name = value_ast.name.value
if not variables or variable_name not in variables:
return None
# Note: we're not doing any checking that this variable is correct. We're assuming that this query
# has been validated and the variable usage here is of the correct type.
return variables.get(variable_name)
if isinstance(type, GraphQLList):
item_type = type.of_type
if isinstance(value_ast, ast.ListValue):
return [
value_from_ast(item_ast, item_type, variables)
for item_ast in value_ast.values
]
else:
return [value_from_ast(value_ast, item_type, variables)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
if not isinstance(value_ast, ast.ObjectValue):
return None
field_asts = {}
for field_ast in value_ast.fields:
field_asts[field_ast.name.value] = field_ast
obj = {}
for field_name, field in fields.items():
if field_name not in field_asts:
if field.default_value is not None:
# We use out_name as the output name for the
# dict if exists
obj[field.out_name or field_name] = field.default_value
continue
field_ast = field_asts[field_name]
field_value_ast = field_ast.value
field_value = value_from_ast(field_value_ast, field.type, variables)
# We use out_name as the output name for the
# dict if exists
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_literal(value_ast) | python | def value_from_ast(value_ast, type, variables=None):
# type: (Optional[Node], GraphQLType, Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and a value AST node known to match this type, build a
runtime value."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValueAST is non-null.
# We're assuming that this query has been validated and the value used here is of the correct type.
return value_from_ast(value_ast, type.of_type, variables)
if value_ast is None:
return None
if isinstance(value_ast, ast.Variable):
variable_name = value_ast.name.value
if not variables or variable_name not in variables:
return None
# Note: we're not doing any checking that this variable is correct. We're assuming that this query
# has been validated and the variable usage here is of the correct type.
return variables.get(variable_name)
if isinstance(type, GraphQLList):
item_type = type.of_type
if isinstance(value_ast, ast.ListValue):
return [
value_from_ast(item_ast, item_type, variables)
for item_ast in value_ast.values
]
else:
return [value_from_ast(value_ast, item_type, variables)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
if not isinstance(value_ast, ast.ObjectValue):
return None
field_asts = {}
for field_ast in value_ast.fields:
field_asts[field_ast.name.value] = field_ast
obj = {}
for field_name, field in fields.items():
if field_name not in field_asts:
if field.default_value is not None:
# We use out_name as the output name for the
# dict if exists
obj[field.out_name or field_name] = field.default_value
continue
field_ast = field_asts[field_name]
field_value_ast = field_ast.value
field_value = value_from_ast(field_value_ast, field.type, variables)
# We use out_name as the output name for the
# dict if exists
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_literal(value_ast) | [
"def",
"value_from_ast",
"(",
"value_ast",
",",
"type",
",",
"variables",
"=",
"None",
")",
":",
"# type: (Optional[Node], GraphQLType, Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]) -> Union[List, Dict, int, float, bool, str, None]",
"if",
"isinstance",
"(",
"t... | Given a type and a value AST node known to match this type, build a
runtime value. | [
"Given",
"a",
"type",
"and",
"a",
"value",
"AST",
"node",
"known",
"to",
"match",
"this",
"type",
"build",
"a",
"runtime",
"value",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/utils/value_from_ast.py#L17-L81 | train | 217,813 |
graphql-python/graphql-core | graphql/utils/ast_to_code.py | ast_to_code | def ast_to_code(ast, indent=0):
# type: (Any, int) -> str
"""
Converts an ast into a python code representation of the AST.
"""
code = []
def append(line):
# type: (str) -> None
code.append((" " * indent) + line)
if isinstance(ast, Node):
append("ast.{}(".format(ast.__class__.__name__))
indent += 1
for i, k in enumerate(ast._fields, 1):
v = getattr(ast, k)
append("{}={},".format(k, ast_to_code(v, indent)))
if ast.loc:
append("loc={}".format(ast_to_code(ast.loc, indent)))
indent -= 1
append(")")
elif isinstance(ast, Loc):
append("loc({}, {})".format(ast.start, ast.end))
elif isinstance(ast, list):
if ast:
append("[")
indent += 1
for i, it in enumerate(ast, 1):
is_last = i == len(ast)
append(ast_to_code(it, indent) + ("," if not is_last else ""))
indent -= 1
append("]")
else:
append("[]")
else:
append(repr(ast))
return "\n".join(code).strip() | python | def ast_to_code(ast, indent=0):
# type: (Any, int) -> str
"""
Converts an ast into a python code representation of the AST.
"""
code = []
def append(line):
# type: (str) -> None
code.append((" " * indent) + line)
if isinstance(ast, Node):
append("ast.{}(".format(ast.__class__.__name__))
indent += 1
for i, k in enumerate(ast._fields, 1):
v = getattr(ast, k)
append("{}={},".format(k, ast_to_code(v, indent)))
if ast.loc:
append("loc={}".format(ast_to_code(ast.loc, indent)))
indent -= 1
append(")")
elif isinstance(ast, Loc):
append("loc({}, {})".format(ast.start, ast.end))
elif isinstance(ast, list):
if ast:
append("[")
indent += 1
for i, it in enumerate(ast, 1):
is_last = i == len(ast)
append(ast_to_code(it, indent) + ("," if not is_last else ""))
indent -= 1
append("]")
else:
append("[]")
else:
append(repr(ast))
return "\n".join(code).strip() | [
"def",
"ast_to_code",
"(",
"ast",
",",
"indent",
"=",
"0",
")",
":",
"# type: (Any, int) -> str",
"code",
"=",
"[",
"]",
"def",
"append",
"(",
"line",
")",
":",
"# type: (str) -> None",
"code",
".",
"append",
"(",
"(",
"\" \"",
"*",
"indent",
")",
"+"... | Converts an ast into a python code representation of the AST. | [
"Converts",
"an",
"ast",
"into",
"a",
"python",
"code",
"representation",
"of",
"the",
"AST",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/utils/ast_to_code.py#L9-L52 | train | 217,814 |
graphql-python/graphql-core | scripts/casing.py | snake | def snake(s):
"""Convert from title or camelCase to snake_case."""
if len(s) < 2:
return s.lower()
out = s[0].lower()
for c in s[1:]:
if c.isupper():
out += "_"
c = c.lower()
out += c
return out | python | def snake(s):
"""Convert from title or camelCase to snake_case."""
if len(s) < 2:
return s.lower()
out = s[0].lower()
for c in s[1:]:
if c.isupper():
out += "_"
c = c.lower()
out += c
return out | [
"def",
"snake",
"(",
"s",
")",
":",
"if",
"len",
"(",
"s",
")",
"<",
"2",
":",
"return",
"s",
".",
"lower",
"(",
")",
"out",
"=",
"s",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"for",
"c",
"in",
"s",
"[",
"1",
":",
"]",
":",
"if",
"c",
"... | Convert from title or camelCase to snake_case. | [
"Convert",
"from",
"title",
"or",
"camelCase",
"to",
"snake_case",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/scripts/casing.py#L19-L29 | train | 217,815 |
graphql-python/graphql-core | graphql/backend/quiver_cloud.py | GraphQLQuiverCloudBackend.make_post_request | def make_post_request(self, url, auth, json_payload):
"""This function executes the request with the provided
json payload and return the json response"""
response = requests.post(url, auth=auth, json=json_payload)
return response.json() | python | def make_post_request(self, url, auth, json_payload):
"""This function executes the request with the provided
json payload and return the json response"""
response = requests.post(url, auth=auth, json=json_payload)
return response.json() | [
"def",
"make_post_request",
"(",
"self",
",",
"url",
",",
"auth",
",",
"json_payload",
")",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"auth",
"=",
"auth",
",",
"json",
"=",
"json_payload",
")",
"return",
"response",
".",
"json",
"... | This function executes the request with the provided
json payload and return the json response | [
"This",
"function",
"executes",
"the",
"request",
"with",
"the",
"provided",
"json",
"payload",
"and",
"return",
"the",
"json",
"response"
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/backend/quiver_cloud.py#L64-L68 | train | 217,816 |
graphql-python/graphql-core | graphql/utils/get_field_def.py | get_field_def | def get_field_def(
schema, # type: GraphQLSchema
parent_type, # type: Union[GraphQLInterfaceType, GraphQLObjectType]
field_ast, # type: Field
):
# type: (...) -> Optional[GraphQLField]
"""Not exactly the same as the executor's definition of get_field_def, in this
statically evaluated environment we do not always have an Object type,
and need to handle Interface and Union types."""
name = field_ast.name.value
if name == "__schema" and schema.get_query_type() == parent_type:
return SchemaMetaFieldDef
elif name == "__type" and schema.get_query_type() == parent_type:
return TypeMetaFieldDef
elif name == "__typename" and isinstance(
parent_type, (GraphQLObjectType, GraphQLInterfaceType, GraphQLUnionType)
):
return TypeNameMetaFieldDef
elif isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):
return parent_type.fields.get(name) | python | def get_field_def(
schema, # type: GraphQLSchema
parent_type, # type: Union[GraphQLInterfaceType, GraphQLObjectType]
field_ast, # type: Field
):
# type: (...) -> Optional[GraphQLField]
"""Not exactly the same as the executor's definition of get_field_def, in this
statically evaluated environment we do not always have an Object type,
and need to handle Interface and Union types."""
name = field_ast.name.value
if name == "__schema" and schema.get_query_type() == parent_type:
return SchemaMetaFieldDef
elif name == "__type" and schema.get_query_type() == parent_type:
return TypeMetaFieldDef
elif name == "__typename" and isinstance(
parent_type, (GraphQLObjectType, GraphQLInterfaceType, GraphQLUnionType)
):
return TypeNameMetaFieldDef
elif isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):
return parent_type.fields.get(name) | [
"def",
"get_field_def",
"(",
"schema",
",",
"# type: GraphQLSchema",
"parent_type",
",",
"# type: Union[GraphQLInterfaceType, GraphQLObjectType]",
"field_ast",
",",
"# type: Field",
")",
":",
"# type: (...) -> Optional[GraphQLField]",
"name",
"=",
"field_ast",
".",
"name",
".... | Not exactly the same as the executor's definition of get_field_def, in this
statically evaluated environment we do not always have an Object type,
and need to handle Interface and Union types. | [
"Not",
"exactly",
"the",
"same",
"as",
"the",
"executor",
"s",
"definition",
"of",
"get_field_def",
"in",
"this",
"statically",
"evaluated",
"environment",
"we",
"do",
"not",
"always",
"have",
"an",
"Object",
"type",
"and",
"need",
"to",
"handle",
"Interface",... | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/utils/get_field_def.py#L16-L38 | train | 217,817 |
graphql-python/graphql-core | graphql/validation/rules/overlapping_fields_can_be_merged.py | _find_conflicts_within_selection_set | def _find_conflicts_within_selection_set(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
parent_type, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None]
selection_set, # type: SelectionSet
):
# type: (...) -> List[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Find all conflicts found "within" a selection set, including those found via spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts = [] # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
field_map, fragment_names = _get_fields_and_fragments_names(
context, cached_fields_and_fragment_names, parent_type, selection_set
)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
_collect_conflicts_within(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
field_map,
)
# (B) Then collect conflicts between these fields and those represented by
# each spread fragment name found.
for i, fragment_name in enumerate(fragment_names):
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
field_map,
fragment_name,
)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other item
# in that same list (except for itself).
for other_fragment_name in fragment_names[i + 1 :]:
_collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
fragment_name,
other_fragment_name,
)
return conflicts | python | def _find_conflicts_within_selection_set(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
parent_type, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None]
selection_set, # type: SelectionSet
):
# type: (...) -> List[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Find all conflicts found "within" a selection set, including those found via spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts = [] # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
field_map, fragment_names = _get_fields_and_fragments_names(
context, cached_fields_and_fragment_names, parent_type, selection_set
)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
_collect_conflicts_within(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
field_map,
)
# (B) Then collect conflicts between these fields and those represented by
# each spread fragment name found.
for i, fragment_name in enumerate(fragment_names):
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
field_map,
fragment_name,
)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other item
# in that same list (except for itself).
for other_fragment_name in fragment_names[i + 1 :]:
_collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
fragment_name,
other_fragment_name,
)
return conflicts | [
"def",
"_find_conflicts_within_selection_set",
"(",
"context",
",",
"# type: ValidationContext",
"cached_fields_and_fragment_names",
",",
"# type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]",
"compared_fra... | Find all conflicts found "within" a selection set, including those found via spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document. | [
"Find",
"all",
"conflicts",
"found",
"within",
"a",
"selection",
"set",
"including",
"those",
"found",
"via",
"spreading",
"in",
"fragments",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/validation/rules/overlapping_fields_can_be_merged.py#L167-L222 | train | 217,818 |
graphql-python/graphql-core | graphql/validation/rules/overlapping_fields_can_be_merged.py | _find_conflicts_between_sub_selection_sets | def _find_conflicts_between_sub_selection_sets(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
are_mutually_exclusive, # type: bool
parent_type1, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None]
selection_set1, # type: SelectionSet
parent_type2, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None]
selection_set2, # type: SelectionSet
):
# type: (...) -> List[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Find all conflicts found between two selection sets.
Includes those found via spreading in fragments. Called when determining if conflicts exist
between the sub-fields of two overlapping fields.
"""
conflicts = [] # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
field_map1, fragment_names1 = _get_fields_and_fragments_names(
context, cached_fields_and_fragment_names, parent_type1, selection_set1
)
field_map2, fragment_names2 = _get_fields_and_fragments_names(
context, cached_fields_and_fragment_names, parent_type2, selection_set2
)
# (H) First, collect all conflicts between these two collections of field.
_collect_conflicts_between(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
field_map1,
field_map2,
)
# (I) Then collect conflicts between the first collection of fields and
# those referenced by each fragment name associated with the second.
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
field_map1,
fragment_name2,
)
# (I) Then collect conflicts between the second collection of fields and
# those referenced by each fragment name associated with the first.
for fragment_name1 in fragment_names1:
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
field_map2,
fragment_name1,
)
# (J) Also collect conflicts between any fragment names by the first and
# fragment names by the second. This compares each item in the first set of
# names to each item in the second set of names.
for fragment_name1 in fragment_names1:
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
fragment_name1,
fragment_name2,
)
return conflicts | python | def _find_conflicts_between_sub_selection_sets(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
are_mutually_exclusive, # type: bool
parent_type1, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None]
selection_set1, # type: SelectionSet
parent_type2, # type: Union[GraphQLInterfaceType, GraphQLObjectType, None]
selection_set2, # type: SelectionSet
):
# type: (...) -> List[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Find all conflicts found between two selection sets.
Includes those found via spreading in fragments. Called when determining if conflicts exist
between the sub-fields of two overlapping fields.
"""
conflicts = [] # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
field_map1, fragment_names1 = _get_fields_and_fragments_names(
context, cached_fields_and_fragment_names, parent_type1, selection_set1
)
field_map2, fragment_names2 = _get_fields_and_fragments_names(
context, cached_fields_and_fragment_names, parent_type2, selection_set2
)
# (H) First, collect all conflicts between these two collections of field.
_collect_conflicts_between(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
field_map1,
field_map2,
)
# (I) Then collect conflicts between the first collection of fields and
# those referenced by each fragment name associated with the second.
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
field_map1,
fragment_name2,
)
# (I) Then collect conflicts between the second collection of fields and
# those referenced by each fragment name associated with the first.
for fragment_name1 in fragment_names1:
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
field_map2,
fragment_name1,
)
# (J) Also collect conflicts between any fragment names by the first and
# fragment names by the second. This compares each item in the first set of
# names to each item in the second set of names.
for fragment_name1 in fragment_names1:
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
fragment_name1,
fragment_name2,
)
return conflicts | [
"def",
"_find_conflicts_between_sub_selection_sets",
"(",
"context",
",",
"# type: ValidationContext",
"cached_fields_and_fragment_names",
",",
"# type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]",
"compar... | Find all conflicts found between two selection sets.
Includes those found via spreading in fragments. Called when determining if conflicts exist
between the sub-fields of two overlapping fields. | [
"Find",
"all",
"conflicts",
"found",
"between",
"two",
"selection",
"sets",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/validation/rules/overlapping_fields_can_be_merged.py#L348-L426 | train | 217,819 |
graphql-python/graphql-core | graphql/validation/rules/overlapping_fields_can_be_merged.py | _find_conflict | def _find_conflict(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
parent_fields_are_mutually_exclusive, # type: bool
response_name, # type: str
field1, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]
field2, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]
):
# type: (...) -> Optional[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Determines if there is a conflict between two particular fields."""
parent_type1, ast1, def1 = field1
parent_type2, ast2, def2 = field2
# If it is known that two fields could not possibly apply at the same
# time, due to the parent types, then it is safe to permit them to diverge
# in aliased field or arguments used as they will not present any ambiguity
# by differing.
# It is known that two parent types could never overlap if they are
# different Object types. Interface or Union types might overlap - if not
# in the current state of the schema, then perhaps in some future version,
# thus may not safely diverge.
are_mutually_exclusive = parent_fields_are_mutually_exclusive or (
parent_type1 != parent_type2
and isinstance(parent_type1, GraphQLObjectType)
and isinstance(parent_type2, GraphQLObjectType)
)
# The return type for each field.
type1 = def1 and def1.type
type2 = def2 and def2.type
if not are_mutually_exclusive:
# Two aliases must refer to the same field.
name1 = ast1.name.value
name2 = ast2.name.value
if name1 != name2:
return (
(response_name, "{} and {} are different fields".format(name1, name2)),
[ast1],
[ast2],
)
# Two field calls must have the same arguments.
if not _same_arguments(ast1.arguments, ast2.arguments):
return ((response_name, "they have differing arguments"), [ast1], [ast2])
if type1 and type2 and do_types_conflict(type1, type2):
return (
(
response_name,
"they return conflicting types {} and {}".format(type1, type2),
),
[ast1],
[ast2],
)
# Collect and compare sub-fields. Use the same "visited fragment names" list
# for both collections so fields in a fragment reference are never
# compared to themselves.
selection_set1 = ast1.selection_set
selection_set2 = ast2.selection_set
if selection_set1 and selection_set2:
conflicts = _find_conflicts_between_sub_selection_sets( # type: ignore
context,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
get_named_type(type1), # type: ignore
selection_set1,
get_named_type(type2), # type: ignore
selection_set2,
)
return _subfield_conflicts(conflicts, response_name, ast1, ast2)
return None | python | def _find_conflict(
context, # type: ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
compared_fragments, # type: PairSet
parent_fields_are_mutually_exclusive, # type: bool
response_name, # type: str
field1, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]
field2, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]
):
# type: (...) -> Optional[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Determines if there is a conflict between two particular fields."""
parent_type1, ast1, def1 = field1
parent_type2, ast2, def2 = field2
# If it is known that two fields could not possibly apply at the same
# time, due to the parent types, then it is safe to permit them to diverge
# in aliased field or arguments used as they will not present any ambiguity
# by differing.
# It is known that two parent types could never overlap if they are
# different Object types. Interface or Union types might overlap - if not
# in the current state of the schema, then perhaps in some future version,
# thus may not safely diverge.
are_mutually_exclusive = parent_fields_are_mutually_exclusive or (
parent_type1 != parent_type2
and isinstance(parent_type1, GraphQLObjectType)
and isinstance(parent_type2, GraphQLObjectType)
)
# The return type for each field.
type1 = def1 and def1.type
type2 = def2 and def2.type
if not are_mutually_exclusive:
# Two aliases must refer to the same field.
name1 = ast1.name.value
name2 = ast2.name.value
if name1 != name2:
return (
(response_name, "{} and {} are different fields".format(name1, name2)),
[ast1],
[ast2],
)
# Two field calls must have the same arguments.
if not _same_arguments(ast1.arguments, ast2.arguments):
return ((response_name, "they have differing arguments"), [ast1], [ast2])
if type1 and type2 and do_types_conflict(type1, type2):
return (
(
response_name,
"they return conflicting types {} and {}".format(type1, type2),
),
[ast1],
[ast2],
)
# Collect and compare sub-fields. Use the same "visited fragment names" list
# for both collections so fields in a fragment reference are never
# compared to themselves.
selection_set1 = ast1.selection_set
selection_set2 = ast2.selection_set
if selection_set1 and selection_set2:
conflicts = _find_conflicts_between_sub_selection_sets( # type: ignore
context,
cached_fields_and_fragment_names,
compared_fragments,
are_mutually_exclusive,
get_named_type(type1), # type: ignore
selection_set1,
get_named_type(type2), # type: ignore
selection_set2,
)
return _subfield_conflicts(conflicts, response_name, ast1, ast2)
return None | [
"def",
"_find_conflict",
"(",
"context",
",",
"# type: ValidationContext",
"cached_fields_and_fragment_names",
",",
"# type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]",
"compared_fragments",
",",
"# t... | Determines if there is a conflict between two particular fields. | [
"Determines",
"if",
"there",
"is",
"a",
"conflict",
"between",
"two",
"particular",
"fields",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/validation/rules/overlapping_fields_can_be_merged.py#L504-L583 | train | 217,820 |
graphql-python/graphql-core | graphql/validation/rules/overlapping_fields_can_be_merged.py | _get_referenced_fields_and_fragment_names | def _get_referenced_fields_and_fragment_names(
context, # ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
fragment, # type: InlineFragment
):
# type: (...) -> Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]
"""Given a reference to a fragment, return the represented collection of fields as well as a list of
nested fragment names referenced via fragment spreads."""
# Short-circuit building a type from the AST if possible.
cached = cached_fields_and_fragment_names.get(fragment.selection_set)
if cached:
return cached
fragment_type = type_from_ast( # type: ignore
context.get_schema(), fragment.type_condition
)
return _get_fields_and_fragments_names( # type: ignore
context, cached_fields_and_fragment_names, fragment_type, fragment.selection_set
) | python | def _get_referenced_fields_and_fragment_names(
context, # ValidationContext
cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]
fragment, # type: InlineFragment
):
# type: (...) -> Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]
"""Given a reference to a fragment, return the represented collection of fields as well as a list of
nested fragment names referenced via fragment spreads."""
# Short-circuit building a type from the AST if possible.
cached = cached_fields_and_fragment_names.get(fragment.selection_set)
if cached:
return cached
fragment_type = type_from_ast( # type: ignore
context.get_schema(), fragment.type_condition
)
return _get_fields_and_fragments_names( # type: ignore
context, cached_fields_and_fragment_names, fragment_type, fragment.selection_set
) | [
"def",
"_get_referenced_fields_and_fragment_names",
"(",
"context",
",",
"# ValidationContext",
"cached_fields_and_fragment_names",
",",
"# type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]",
"fragment",
... | Given a reference to a fragment, return the represented collection of fields as well as a list of
nested fragment names referenced via fragment spreads. | [
"Given",
"a",
"reference",
"to",
"a",
"fragment",
"return",
"the",
"represented",
"collection",
"of",
"fields",
"as",
"well",
"as",
"a",
"list",
"of",
"nested",
"fragment",
"names",
"referenced",
"via",
"fragment",
"spreads",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/validation/rules/overlapping_fields_can_be_merged.py#L609-L630 | train | 217,821 |
graphql-python/graphql-core | graphql/validation/rules/overlapping_fields_can_be_merged.py | _subfield_conflicts | def _subfield_conflicts(
conflicts, # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
response_name, # type: str
ast1, # type: Node
ast2, # type: Node
):
# type: (...) -> Optional[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Given a series of Conflicts which occurred between two sub-fields, generate a single Conflict."""
if conflicts:
return ( # type: ignore
(response_name, [conflict[0] for conflict in conflicts]),
tuple(itertools.chain([ast1], *[conflict[1] for conflict in conflicts])),
tuple(itertools.chain([ast2], *[conflict[2] for conflict in conflicts])),
)
return None | python | def _subfield_conflicts(
conflicts, # type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]
response_name, # type: str
ast1, # type: Node
ast2, # type: Node
):
# type: (...) -> Optional[Tuple[Tuple[str, str], List[Node], List[Node]]]
"""Given a series of Conflicts which occurred between two sub-fields, generate a single Conflict."""
if conflicts:
return ( # type: ignore
(response_name, [conflict[0] for conflict in conflicts]),
tuple(itertools.chain([ast1], *[conflict[1] for conflict in conflicts])),
tuple(itertools.chain([ast2], *[conflict[2] for conflict in conflicts])),
)
return None | [
"def",
"_subfield_conflicts",
"(",
"conflicts",
",",
"# type: List[Tuple[Tuple[str, str], List[Node], List[Node]]]",
"response_name",
",",
"# type: str",
"ast1",
",",
"# type: Node",
"ast2",
",",
"# type: Node",
")",
":",
"# type: (...) -> Optional[Tuple[Tuple[str, str], List[Node]... | Given a series of Conflicts which occurred between two sub-fields, generate a single Conflict. | [
"Given",
"a",
"series",
"of",
"Conflicts",
"which",
"occurred",
"between",
"two",
"sub",
"-",
"fields",
"generate",
"a",
"single",
"Conflict",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/validation/rules/overlapping_fields_can_be_merged.py#L677-L691 | train | 217,822 |
graphql-python/graphql-core | graphql/execution/utils.py | collect_fields | def collect_fields(
ctx, # type: ExecutionContext
runtime_type, # type: GraphQLObjectType
selection_set, # type: SelectionSet
fields, # type: DefaultOrderedDict
prev_fragment_names, # type: Set[str]
):
# type: (...) -> DefaultOrderedDict
"""
Given a selectionSet, adds all of the fields in that selection to
the passed in map of fields, and returns it at the end.
collect_fields requires the "runtime type" of an object. For a field which
returns and Interface or Union type, the "runtime type" will be the actual
Object type returned by that field.
"""
for selection in selection_set.selections:
directives = selection.directives
if isinstance(selection, ast.Field):
if not should_include_node(ctx, directives):
continue
name = get_field_entry_key(selection)
fields[name].append(selection)
elif isinstance(selection, ast.InlineFragment):
if not should_include_node(
ctx, directives
) or not does_fragment_condition_match(ctx, selection, runtime_type):
continue
collect_fields(
ctx, runtime_type, selection.selection_set, fields, prev_fragment_names
)
elif isinstance(selection, ast.FragmentSpread):
frag_name = selection.name.value
if frag_name in prev_fragment_names or not should_include_node(
ctx, directives
):
continue
prev_fragment_names.add(frag_name)
fragment = ctx.fragments[frag_name]
frag_directives = fragment.directives
if (
not fragment
or not should_include_node(ctx, frag_directives)
or not does_fragment_condition_match(ctx, fragment, runtime_type)
):
continue
collect_fields(
ctx, runtime_type, fragment.selection_set, fields, prev_fragment_names
)
return fields | python | def collect_fields(
ctx, # type: ExecutionContext
runtime_type, # type: GraphQLObjectType
selection_set, # type: SelectionSet
fields, # type: DefaultOrderedDict
prev_fragment_names, # type: Set[str]
):
# type: (...) -> DefaultOrderedDict
"""
Given a selectionSet, adds all of the fields in that selection to
the passed in map of fields, and returns it at the end.
collect_fields requires the "runtime type" of an object. For a field which
returns and Interface or Union type, the "runtime type" will be the actual
Object type returned by that field.
"""
for selection in selection_set.selections:
directives = selection.directives
if isinstance(selection, ast.Field):
if not should_include_node(ctx, directives):
continue
name = get_field_entry_key(selection)
fields[name].append(selection)
elif isinstance(selection, ast.InlineFragment):
if not should_include_node(
ctx, directives
) or not does_fragment_condition_match(ctx, selection, runtime_type):
continue
collect_fields(
ctx, runtime_type, selection.selection_set, fields, prev_fragment_names
)
elif isinstance(selection, ast.FragmentSpread):
frag_name = selection.name.value
if frag_name in prev_fragment_names or not should_include_node(
ctx, directives
):
continue
prev_fragment_names.add(frag_name)
fragment = ctx.fragments[frag_name]
frag_directives = fragment.directives
if (
not fragment
or not should_include_node(ctx, frag_directives)
or not does_fragment_condition_match(ctx, fragment, runtime_type)
):
continue
collect_fields(
ctx, runtime_type, fragment.selection_set, fields, prev_fragment_names
)
return fields | [
"def",
"collect_fields",
"(",
"ctx",
",",
"# type: ExecutionContext",
"runtime_type",
",",
"# type: GraphQLObjectType",
"selection_set",
",",
"# type: SelectionSet",
"fields",
",",
"# type: DefaultOrderedDict",
"prev_fragment_names",
",",
"# type: Set[str]",
")",
":",
"# type... | Given a selectionSet, adds all of the fields in that selection to
the passed in map of fields, and returns it at the end.
collect_fields requires the "runtime type" of an object. For a field which
returns and Interface or Union type, the "runtime type" will be the actual
Object type returned by that field. | [
"Given",
"a",
"selectionSet",
"adds",
"all",
"of",
"the",
"fields",
"in",
"that",
"selection",
"to",
"the",
"passed",
"in",
"map",
"of",
"fields",
"and",
"returns",
"it",
"at",
"the",
"end",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/utils.py#L224-L282 | train | 217,823 |
graphql-python/graphql-core | graphql/execution/utils.py | should_include_node | def should_include_node(ctx, directives):
# type: (ExecutionContext, Optional[List[Directive]]) -> bool
"""Determines if a field should be included based on the @include and
@skip directives, where @skip has higher precidence than @include."""
# TODO: Refactor based on latest code
if directives:
skip_ast = None
for directive in directives:
if directive.name.value == GraphQLSkipDirective.name:
skip_ast = directive
break
if skip_ast:
args = get_argument_values(
GraphQLSkipDirective.args, skip_ast.arguments, ctx.variable_values
)
if args.get("if") is True:
return False
include_ast = None
for directive in directives:
if directive.name.value == GraphQLIncludeDirective.name:
include_ast = directive
break
if include_ast:
args = get_argument_values(
GraphQLIncludeDirective.args, include_ast.arguments, ctx.variable_values
)
if args.get("if") is False:
return False
return True | python | def should_include_node(ctx, directives):
# type: (ExecutionContext, Optional[List[Directive]]) -> bool
"""Determines if a field should be included based on the @include and
@skip directives, where @skip has higher precidence than @include."""
# TODO: Refactor based on latest code
if directives:
skip_ast = None
for directive in directives:
if directive.name.value == GraphQLSkipDirective.name:
skip_ast = directive
break
if skip_ast:
args = get_argument_values(
GraphQLSkipDirective.args, skip_ast.arguments, ctx.variable_values
)
if args.get("if") is True:
return False
include_ast = None
for directive in directives:
if directive.name.value == GraphQLIncludeDirective.name:
include_ast = directive
break
if include_ast:
args = get_argument_values(
GraphQLIncludeDirective.args, include_ast.arguments, ctx.variable_values
)
if args.get("if") is False:
return False
return True | [
"def",
"should_include_node",
"(",
"ctx",
",",
"directives",
")",
":",
"# type: (ExecutionContext, Optional[List[Directive]]) -> bool",
"# TODO: Refactor based on latest code",
"if",
"directives",
":",
"skip_ast",
"=",
"None",
"for",
"directive",
"in",
"directives",
":",
"i... | Determines if a field should be included based on the @include and
@skip directives, where @skip has higher precidence than @include. | [
"Determines",
"if",
"a",
"field",
"should",
"be",
"included",
"based",
"on",
"the"
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/utils.py#L285-L320 | train | 217,824 |
graphql-python/graphql-core | graphql/execution/utils.py | default_resolve_fn | def default_resolve_fn(source, info, **args):
# type: (Any, ResolveInfo, **Any) -> Optional[Any]
"""If a resolve function is not given, then a default resolve behavior is used which takes the property of the source object
of the same name as the field and returns it as the result, or if it's a function, returns the result of calling that function."""
name = info.field_name
if isinstance(source, dict):
property = source.get(name)
else:
property = getattr(source, name, None)
if callable(property):
return property()
return property | python | def default_resolve_fn(source, info, **args):
# type: (Any, ResolveInfo, **Any) -> Optional[Any]
"""If a resolve function is not given, then a default resolve behavior is used which takes the property of the source object
of the same name as the field and returns it as the result, or if it's a function, returns the result of calling that function."""
name = info.field_name
if isinstance(source, dict):
property = source.get(name)
else:
property = getattr(source, name, None)
if callable(property):
return property()
return property | [
"def",
"default_resolve_fn",
"(",
"source",
",",
"info",
",",
"*",
"*",
"args",
")",
":",
"# type: (Any, ResolveInfo, **Any) -> Optional[Any]",
"name",
"=",
"info",
".",
"field_name",
"if",
"isinstance",
"(",
"source",
",",
"dict",
")",
":",
"property",
"=",
"... | If a resolve function is not given, then a default resolve behavior is used which takes the property of the source object
of the same name as the field and returns it as the result, or if it's a function, returns the result of calling that function. | [
"If",
"a",
"resolve",
"function",
"is",
"not",
"given",
"then",
"a",
"default",
"resolve",
"behavior",
"is",
"used",
"which",
"takes",
"the",
"property",
"of",
"the",
"source",
"object",
"of",
"the",
"same",
"name",
"as",
"the",
"field",
"and",
"returns",
... | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/utils.py#L351-L362 | train | 217,825 |
graphql-python/graphql-core | graphql/execution/values.py | get_variable_values | def get_variable_values(
schema, # type: GraphQLSchema
definition_asts, # type: List[VariableDefinition]
inputs, # type: Any
):
# type: (...) -> Dict[str, Any]
"""Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input.
If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown."""
if inputs is None:
inputs = {}
values = {}
for def_ast in definition_asts:
var_name = def_ast.variable.name.value
var_type = type_from_ast(schema, def_ast.type)
value = inputs.get(var_name)
if not is_input_type(var_type):
raise GraphQLError(
'Variable "${var_name}" expected value of type "{var_type}" which cannot be used as an input type.'.format(
var_name=var_name, var_type=print_ast(def_ast.type)
),
[def_ast],
)
elif value is None:
if def_ast.default_value is not None:
values[var_name] = value_from_ast(
def_ast.default_value, var_type
) # type: ignore
if isinstance(var_type, GraphQLNonNull):
raise GraphQLError(
'Variable "${var_name}" of required type "{var_type}" was not provided.'.format(
var_name=var_name, var_type=var_type
),
[def_ast],
)
else:
errors = is_valid_value(value, var_type)
if errors:
message = u"\n" + u"\n".join(errors)
raise GraphQLError(
'Variable "${}" got invalid value {}.{}'.format(
var_name, json.dumps(value, sort_keys=True), message
),
[def_ast],
)
coerced_value = coerce_value(var_type, value)
if coerced_value is None:
raise Exception("Should have reported error.")
values[var_name] = coerced_value
return values | python | def get_variable_values(
schema, # type: GraphQLSchema
definition_asts, # type: List[VariableDefinition]
inputs, # type: Any
):
# type: (...) -> Dict[str, Any]
"""Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input.
If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown."""
if inputs is None:
inputs = {}
values = {}
for def_ast in definition_asts:
var_name = def_ast.variable.name.value
var_type = type_from_ast(schema, def_ast.type)
value = inputs.get(var_name)
if not is_input_type(var_type):
raise GraphQLError(
'Variable "${var_name}" expected value of type "{var_type}" which cannot be used as an input type.'.format(
var_name=var_name, var_type=print_ast(def_ast.type)
),
[def_ast],
)
elif value is None:
if def_ast.default_value is not None:
values[var_name] = value_from_ast(
def_ast.default_value, var_type
) # type: ignore
if isinstance(var_type, GraphQLNonNull):
raise GraphQLError(
'Variable "${var_name}" of required type "{var_type}" was not provided.'.format(
var_name=var_name, var_type=var_type
),
[def_ast],
)
else:
errors = is_valid_value(value, var_type)
if errors:
message = u"\n" + u"\n".join(errors)
raise GraphQLError(
'Variable "${}" got invalid value {}.{}'.format(
var_name, json.dumps(value, sort_keys=True), message
),
[def_ast],
)
coerced_value = coerce_value(var_type, value)
if coerced_value is None:
raise Exception("Should have reported error.")
values[var_name] = coerced_value
return values | [
"def",
"get_variable_values",
"(",
"schema",
",",
"# type: GraphQLSchema",
"definition_asts",
",",
"# type: List[VariableDefinition]",
"inputs",
",",
"# type: Any",
")",
":",
"# type: (...) -> Dict[str, Any]",
"if",
"inputs",
"is",
"None",
":",
"inputs",
"=",
"{",
"}",
... | Prepares an object map of variables of the correct type based on the provided variable definitions and arbitrary input.
If the input cannot be parsed to match the variable definitions, a GraphQLError will be thrown. | [
"Prepares",
"an",
"object",
"map",
"of",
"variables",
"of",
"the",
"correct",
"type",
"based",
"on",
"the",
"provided",
"variable",
"definitions",
"and",
"arbitrary",
"input",
".",
"If",
"the",
"input",
"cannot",
"be",
"parsed",
"to",
"match",
"the",
"variab... | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/values.py#L34-L86 | train | 217,826 |
graphql-python/graphql-core | graphql/execution/values.py | get_argument_values | def get_argument_values(
arg_defs, # type: Union[Dict[str, GraphQLArgument], Dict]
arg_asts, # type: Optional[List[Argument]]
variables=None, # type: Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]
):
# type: (...) -> Dict[str, Any]
"""Prepares an object map of argument values given a list of argument
definitions and list of argument AST nodes."""
if not arg_defs:
return {}
if arg_asts:
arg_ast_map = {
arg.name.value: arg for arg in arg_asts
} # type: Dict[str, Argument]
else:
arg_ast_map = {}
result = {}
for name, arg_def in arg_defs.items():
arg_type = arg_def.type
arg_ast = arg_ast_map.get(name)
if name not in arg_ast_map:
if arg_def.default_value is not None:
result[arg_def.out_name or name] = arg_def.default_value
continue
elif isinstance(arg_type, GraphQLNonNull):
raise GraphQLError(
'Argument "{name}" of required type {arg_type}" was not provided.'.format(
name=name, arg_type=arg_type
),
arg_asts,
)
elif isinstance(arg_ast.value, ast.Variable): # type: ignore
variable_name = arg_ast.value.name.value # type: ignore
if variables and variable_name in variables:
result[arg_def.out_name or name] = variables[variable_name]
elif arg_def.default_value is not None:
result[arg_def.out_name or name] = arg_def.default_value
elif isinstance(arg_type, GraphQLNonNull):
raise GraphQLError(
'Argument "{name}" of required type {arg_type}" provided the variable "${variable_name}" which was not provided'.format(
name=name, arg_type=arg_type, variable_name=variable_name
),
arg_asts,
)
continue
else:
value = value_from_ast(arg_ast.value, arg_type, variables) # type: ignore
if value is None:
if arg_def.default_value is not None:
value = arg_def.default_value
result[arg_def.out_name or name] = value
else:
# We use out_name as the output name for the
# dict if exists
result[arg_def.out_name or name] = value
return result | python | def get_argument_values(
arg_defs, # type: Union[Dict[str, GraphQLArgument], Dict]
arg_asts, # type: Optional[List[Argument]]
variables=None, # type: Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]
):
# type: (...) -> Dict[str, Any]
"""Prepares an object map of argument values given a list of argument
definitions and list of argument AST nodes."""
if not arg_defs:
return {}
if arg_asts:
arg_ast_map = {
arg.name.value: arg for arg in arg_asts
} # type: Dict[str, Argument]
else:
arg_ast_map = {}
result = {}
for name, arg_def in arg_defs.items():
arg_type = arg_def.type
arg_ast = arg_ast_map.get(name)
if name not in arg_ast_map:
if arg_def.default_value is not None:
result[arg_def.out_name or name] = arg_def.default_value
continue
elif isinstance(arg_type, GraphQLNonNull):
raise GraphQLError(
'Argument "{name}" of required type {arg_type}" was not provided.'.format(
name=name, arg_type=arg_type
),
arg_asts,
)
elif isinstance(arg_ast.value, ast.Variable): # type: ignore
variable_name = arg_ast.value.name.value # type: ignore
if variables and variable_name in variables:
result[arg_def.out_name or name] = variables[variable_name]
elif arg_def.default_value is not None:
result[arg_def.out_name or name] = arg_def.default_value
elif isinstance(arg_type, GraphQLNonNull):
raise GraphQLError(
'Argument "{name}" of required type {arg_type}" provided the variable "${variable_name}" which was not provided'.format(
name=name, arg_type=arg_type, variable_name=variable_name
),
arg_asts,
)
continue
else:
value = value_from_ast(arg_ast.value, arg_type, variables) # type: ignore
if value is None:
if arg_def.default_value is not None:
value = arg_def.default_value
result[arg_def.out_name or name] = value
else:
# We use out_name as the output name for the
# dict if exists
result[arg_def.out_name or name] = value
return result | [
"def",
"get_argument_values",
"(",
"arg_defs",
",",
"# type: Union[Dict[str, GraphQLArgument], Dict]",
"arg_asts",
",",
"# type: Optional[List[Argument]]",
"variables",
"=",
"None",
",",
"# type: Optional[Dict[str, Union[List, Dict, int, float, bool, str, None]]]",
")",
":",
"# type:... | Prepares an object map of argument values given a list of argument
definitions and list of argument AST nodes. | [
"Prepares",
"an",
"object",
"map",
"of",
"argument",
"values",
"given",
"a",
"list",
"of",
"argument",
"definitions",
"and",
"list",
"of",
"argument",
"AST",
"nodes",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/values.py#L89-L148 | train | 217,827 |
graphql-python/graphql-core | graphql/execution/values.py | coerce_value | def coerce_value(type, value):
# type: (Any, Any) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and any value, return a runtime value coerced to match the type."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValue is
# non-null.
# We only call this function after calling isValidValue.
return coerce_value(type.of_type, value)
if value is None:
return None
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, Iterable):
return [coerce_value(item_type, item) for item in value]
else:
return [coerce_value(item_type, value)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
obj = {}
for field_name, field in fields.items():
if field_name not in value:
if field.default_value is not None:
field_value = field.default_value
obj[field.out_name or field_name] = field_value
else:
field_value = coerce_value(field.type, value.get(field_name))
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_value(value) | python | def coerce_value(type, value):
# type: (Any, Any) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and any value, return a runtime value coerced to match the type."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValue is
# non-null.
# We only call this function after calling isValidValue.
return coerce_value(type.of_type, value)
if value is None:
return None
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, Iterable):
return [coerce_value(item_type, item) for item in value]
else:
return [coerce_value(item_type, value)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
obj = {}
for field_name, field in fields.items():
if field_name not in value:
if field.default_value is not None:
field_value = field.default_value
obj[field.out_name or field_name] = field_value
else:
field_value = coerce_value(field.type, value.get(field_name))
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_value(value) | [
"def",
"coerce_value",
"(",
"type",
",",
"value",
")",
":",
"# type: (Any, Any) -> Union[List, Dict, int, float, bool, str, None]",
"if",
"isinstance",
"(",
"type",
",",
"GraphQLNonNull",
")",
":",
"# Note: we're not checking that the result of coerceValue is",
"# non-null.",
"... | Given a type and any value, return a runtime value coerced to match the type. | [
"Given",
"a",
"type",
"and",
"any",
"value",
"return",
"a",
"runtime",
"value",
"coerced",
"to",
"match",
"the",
"type",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/execution/values.py#L151-L186 | train | 217,828 |
graphql-python/graphql-core | graphql/language/parser.py | parse | def parse(source, **kwargs):
# type: (Union[Source, str], **Any) -> Document
"""Given a GraphQL source, parses it into a Document."""
options = {"no_location": False, "no_source": False}
options.update(kwargs)
if isinstance(source, string_types):
source_obj = Source(source) # type: Source
else:
source_obj = source # type: ignore
parser = Parser(source_obj, options)
return parse_document(parser) | python | def parse(source, **kwargs):
# type: (Union[Source, str], **Any) -> Document
"""Given a GraphQL source, parses it into a Document."""
options = {"no_location": False, "no_source": False}
options.update(kwargs)
if isinstance(source, string_types):
source_obj = Source(source) # type: Source
else:
source_obj = source # type: ignore
parser = Parser(source_obj, options)
return parse_document(parser) | [
"def",
"parse",
"(",
"source",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (Union[Source, str], **Any) -> Document",
"options",
"=",
"{",
"\"no_location\"",
":",
"False",
",",
"\"no_source\"",
":",
"False",
"}",
"options",
".",
"update",
"(",
"kwargs",
")",
"i... | Given a GraphQL source, parses it into a Document. | [
"Given",
"a",
"GraphQL",
"source",
"parses",
"it",
"into",
"a",
"Document",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L52-L64 | train | 217,829 |
graphql-python/graphql-core | graphql/language/parser.py | loc | def loc(parser, start):
# type: (Parser, int) -> Optional[Loc]
"""Returns a location object, used to identify the place in
the source that created a given parsed object."""
if parser.options["no_location"]:
return None
if parser.options["no_source"]:
return Loc(start, parser.prev_end)
return Loc(start, parser.prev_end, parser.source) | python | def loc(parser, start):
# type: (Parser, int) -> Optional[Loc]
"""Returns a location object, used to identify the place in
the source that created a given parsed object."""
if parser.options["no_location"]:
return None
if parser.options["no_source"]:
return Loc(start, parser.prev_end)
return Loc(start, parser.prev_end, parser.source) | [
"def",
"loc",
"(",
"parser",
",",
"start",
")",
":",
"# type: (Parser, int) -> Optional[Loc]",
"if",
"parser",
".",
"options",
"[",
"\"no_location\"",
"]",
":",
"return",
"None",
"if",
"parser",
".",
"options",
"[",
"\"no_source\"",
"]",
":",
"return",
"Loc",
... | Returns a location object, used to identify the place in
the source that created a given parsed object. | [
"Returns",
"a",
"location",
"object",
"used",
"to",
"identify",
"the",
"place",
"in",
"the",
"source",
"that",
"created",
"a",
"given",
"parsed",
"object",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L114-L124 | train | 217,830 |
graphql-python/graphql-core | graphql/language/parser.py | advance | def advance(parser):
# type: (Parser) -> None
"""Moves the internal parser object to the next lexed token."""
prev_end = parser.token.end
parser.prev_end = prev_end
parser.token = parser.lexer.next_token(prev_end) | python | def advance(parser):
# type: (Parser) -> None
"""Moves the internal parser object to the next lexed token."""
prev_end = parser.token.end
parser.prev_end = prev_end
parser.token = parser.lexer.next_token(prev_end) | [
"def",
"advance",
"(",
"parser",
")",
":",
"# type: (Parser) -> None",
"prev_end",
"=",
"parser",
".",
"token",
".",
"end",
"parser",
".",
"prev_end",
"=",
"prev_end",
"parser",
".",
"token",
"=",
"parser",
".",
"lexer",
".",
"next_token",
"(",
"prev_end",
... | Moves the internal parser object to the next lexed token. | [
"Moves",
"the",
"internal",
"parser",
"object",
"to",
"the",
"next",
"lexed",
"token",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L127-L132 | train | 217,831 |
graphql-python/graphql-core | graphql/language/parser.py | skip | def skip(parser, kind):
# type: (Parser, int) -> bool
"""If the next token is of the given kind, return true after advancing
the parser. Otherwise, do not change the parser state
and throw an error."""
match = parser.token.kind == kind
if match:
advance(parser)
return match | python | def skip(parser, kind):
# type: (Parser, int) -> bool
"""If the next token is of the given kind, return true after advancing
the parser. Otherwise, do not change the parser state
and throw an error."""
match = parser.token.kind == kind
if match:
advance(parser)
return match | [
"def",
"skip",
"(",
"parser",
",",
"kind",
")",
":",
"# type: (Parser, int) -> bool",
"match",
"=",
"parser",
".",
"token",
".",
"kind",
"==",
"kind",
"if",
"match",
":",
"advance",
"(",
"parser",
")",
"return",
"match"
] | If the next token is of the given kind, return true after advancing
the parser. Otherwise, do not change the parser state
and throw an error. | [
"If",
"the",
"next",
"token",
"is",
"of",
"the",
"given",
"kind",
"return",
"true",
"after",
"advancing",
"the",
"parser",
".",
"Otherwise",
"do",
"not",
"change",
"the",
"parser",
"state",
"and",
"throw",
"an",
"error",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L141-L150 | train | 217,832 |
graphql-python/graphql-core | graphql/language/parser.py | expect | def expect(parser, kind):
# type: (Parser, int) -> Token
"""If the next token is of the given kind, return that token after
advancing the parser. Otherwise, do not change the parser state and
return False."""
token = parser.token
if token.kind == kind:
advance(parser)
return token
raise GraphQLSyntaxError(
parser.source,
token.start,
u"Expected {}, found {}".format(
get_token_kind_desc(kind), get_token_desc(token)
),
) | python | def expect(parser, kind):
# type: (Parser, int) -> Token
"""If the next token is of the given kind, return that token after
advancing the parser. Otherwise, do not change the parser state and
return False."""
token = parser.token
if token.kind == kind:
advance(parser)
return token
raise GraphQLSyntaxError(
parser.source,
token.start,
u"Expected {}, found {}".format(
get_token_kind_desc(kind), get_token_desc(token)
),
) | [
"def",
"expect",
"(",
"parser",
",",
"kind",
")",
":",
"# type: (Parser, int) -> Token",
"token",
"=",
"parser",
".",
"token",
"if",
"token",
".",
"kind",
"==",
"kind",
":",
"advance",
"(",
"parser",
")",
"return",
"token",
"raise",
"GraphQLSyntaxError",
"("... | If the next token is of the given kind, return that token after
advancing the parser. Otherwise, do not change the parser state and
return False. | [
"If",
"the",
"next",
"token",
"is",
"of",
"the",
"given",
"kind",
"return",
"that",
"token",
"after",
"advancing",
"the",
"parser",
".",
"Otherwise",
"do",
"not",
"change",
"the",
"parser",
"state",
"and",
"return",
"False",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L153-L169 | train | 217,833 |
graphql-python/graphql-core | graphql/language/parser.py | expect_keyword | def expect_keyword(parser, value):
# type: (Parser, str) -> Token
"""If the next token is a keyword with the given value, return that
token after advancing the parser. Otherwise, do not change the parser
state and return False."""
token = parser.token
if token.kind == TokenKind.NAME and token.value == value:
advance(parser)
return token
raise GraphQLSyntaxError(
parser.source,
token.start,
u'Expected "{}", found {}'.format(value, get_token_desc(token)),
) | python | def expect_keyword(parser, value):
# type: (Parser, str) -> Token
"""If the next token is a keyword with the given value, return that
token after advancing the parser. Otherwise, do not change the parser
state and return False."""
token = parser.token
if token.kind == TokenKind.NAME and token.value == value:
advance(parser)
return token
raise GraphQLSyntaxError(
parser.source,
token.start,
u'Expected "{}", found {}'.format(value, get_token_desc(token)),
) | [
"def",
"expect_keyword",
"(",
"parser",
",",
"value",
")",
":",
"# type: (Parser, str) -> Token",
"token",
"=",
"parser",
".",
"token",
"if",
"token",
".",
"kind",
"==",
"TokenKind",
".",
"NAME",
"and",
"token",
".",
"value",
"==",
"value",
":",
"advance",
... | If the next token is a keyword with the given value, return that
token after advancing the parser. Otherwise, do not change the parser
state and return False. | [
"If",
"the",
"next",
"token",
"is",
"a",
"keyword",
"with",
"the",
"given",
"value",
"return",
"that",
"token",
"after",
"advancing",
"the",
"parser",
".",
"Otherwise",
"do",
"not",
"change",
"the",
"parser",
"state",
"and",
"return",
"False",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L172-L186 | train | 217,834 |
graphql-python/graphql-core | graphql/language/parser.py | unexpected | def unexpected(parser, at_token=None):
# type: (Parser, Optional[Any]) -> GraphQLSyntaxError
"""Helper function for creating an error when an unexpected lexed token
is encountered."""
token = at_token or parser.token
return GraphQLSyntaxError(
parser.source, token.start, u"Unexpected {}".format(get_token_desc(token))
) | python | def unexpected(parser, at_token=None):
# type: (Parser, Optional[Any]) -> GraphQLSyntaxError
"""Helper function for creating an error when an unexpected lexed token
is encountered."""
token = at_token or parser.token
return GraphQLSyntaxError(
parser.source, token.start, u"Unexpected {}".format(get_token_desc(token))
) | [
"def",
"unexpected",
"(",
"parser",
",",
"at_token",
"=",
"None",
")",
":",
"# type: (Parser, Optional[Any]) -> GraphQLSyntaxError",
"token",
"=",
"at_token",
"or",
"parser",
".",
"token",
"return",
"GraphQLSyntaxError",
"(",
"parser",
".",
"source",
",",
"token",
... | Helper function for creating an error when an unexpected lexed token
is encountered. | [
"Helper",
"function",
"for",
"creating",
"an",
"error",
"when",
"an",
"unexpected",
"lexed",
"token",
"is",
"encountered",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L189-L196 | train | 217,835 |
graphql-python/graphql-core | graphql/language/parser.py | any | def any(parser, open_kind, parse_fn, close_kind):
# type: (Parser, int, Callable, int) -> Any
"""Returns a possibly empty list of parse nodes, determined by
the parse_fn. This list begins with a lex token of openKind
and ends with a lex token of closeKind. Advances the parser
to the next lex token after the closing token."""
expect(parser, open_kind)
nodes = []
while not skip(parser, close_kind):
nodes.append(parse_fn(parser))
return nodes | python | def any(parser, open_kind, parse_fn, close_kind):
# type: (Parser, int, Callable, int) -> Any
"""Returns a possibly empty list of parse nodes, determined by
the parse_fn. This list begins with a lex token of openKind
and ends with a lex token of closeKind. Advances the parser
to the next lex token after the closing token."""
expect(parser, open_kind)
nodes = []
while not skip(parser, close_kind):
nodes.append(parse_fn(parser))
return nodes | [
"def",
"any",
"(",
"parser",
",",
"open_kind",
",",
"parse_fn",
",",
"close_kind",
")",
":",
"# type: (Parser, int, Callable, int) -> Any",
"expect",
"(",
"parser",
",",
"open_kind",
")",
"nodes",
"=",
"[",
"]",
"while",
"not",
"skip",
"(",
"parser",
",",
"c... | Returns a possibly empty list of parse nodes, determined by
the parse_fn. This list begins with a lex token of openKind
and ends with a lex token of closeKind. Advances the parser
to the next lex token after the closing token. | [
"Returns",
"a",
"possibly",
"empty",
"list",
"of",
"parse",
"nodes",
"determined",
"by",
"the",
"parse_fn",
".",
"This",
"list",
"begins",
"with",
"a",
"lex",
"token",
"of",
"openKind",
"and",
"ends",
"with",
"a",
"lex",
"token",
"of",
"closeKind",
".",
... | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L199-L210 | train | 217,836 |
graphql-python/graphql-core | graphql/language/parser.py | parse_name | def parse_name(parser):
# type: (Parser) -> Name
"""Converts a name lex token into a name parse node."""
token = expect(parser, TokenKind.NAME)
return ast.Name(value=token.value, loc=loc(parser, token.start)) | python | def parse_name(parser):
# type: (Parser) -> Name
"""Converts a name lex token into a name parse node."""
token = expect(parser, TokenKind.NAME)
return ast.Name(value=token.value, loc=loc(parser, token.start)) | [
"def",
"parse_name",
"(",
"parser",
")",
":",
"# type: (Parser) -> Name",
"token",
"=",
"expect",
"(",
"parser",
",",
"TokenKind",
".",
"NAME",
")",
"return",
"ast",
".",
"Name",
"(",
"value",
"=",
"token",
".",
"value",
",",
"loc",
"=",
"loc",
"(",
"p... | Converts a name lex token into a name parse node. | [
"Converts",
"a",
"name",
"lex",
"token",
"into",
"a",
"name",
"parse",
"node",
"."
] | d8e9d3abe7c209eb2f51cf001402783bfd480596 | https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/language/parser.py#L227-L231 | train | 217,837 |
kieferk/dfply | dfply/summary_functions.py | mean | def mean(series):
"""
Returns the mean of a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.mean()
else:
return np.nan | python | def mean(series):
"""
Returns the mean of a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.mean()
else:
return np.nan | [
"def",
"mean",
"(",
"series",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"series",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"return",
"series",
".",
"mean",
"(",
")",
"else",
":",
"return",
"np",
".",
"nan"
] | Returns the mean of a series.
Args:
series (pandas.Series): column to summarize. | [
"Returns",
"the",
"mean",
"of",
"a",
"series",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/summary_functions.py#L11-L22 | train | 217,838 |
kieferk/dfply | dfply/summary_functions.py | first | def first(series, order_by=None):
"""
Returns the first value of a series.
Args:
series (pandas.Series): column to summarize.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
"""
if order_by is not None:
series = order_series_by(series, order_by)
first_s = series.iloc[0]
return first_s | python | def first(series, order_by=None):
"""
Returns the first value of a series.
Args:
series (pandas.Series): column to summarize.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
"""
if order_by is not None:
series = order_series_by(series, order_by)
first_s = series.iloc[0]
return first_s | [
"def",
"first",
"(",
"series",
",",
"order_by",
"=",
"None",
")",
":",
"if",
"order_by",
"is",
"not",
"None",
":",
"series",
"=",
"order_series_by",
"(",
"series",
",",
"order_by",
")",
"first_s",
"=",
"series",
".",
"iloc",
"[",
"0",
"]",
"return",
... | Returns the first value of a series.
Args:
series (pandas.Series): column to summarize.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization. | [
"Returns",
"the",
"first",
"value",
"of",
"a",
"series",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/summary_functions.py#L26-L41 | train | 217,839 |
kieferk/dfply | dfply/summary_functions.py | last | def last(series, order_by=None):
"""
Returns the last value of a series.
Args:
series (pandas.Series): column to summarize.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
"""
if order_by is not None:
series = order_series_by(series, order_by)
last_s = series.iloc[series.size - 1]
return last_s | python | def last(series, order_by=None):
"""
Returns the last value of a series.
Args:
series (pandas.Series): column to summarize.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
"""
if order_by is not None:
series = order_series_by(series, order_by)
last_s = series.iloc[series.size - 1]
return last_s | [
"def",
"last",
"(",
"series",
",",
"order_by",
"=",
"None",
")",
":",
"if",
"order_by",
"is",
"not",
"None",
":",
"series",
"=",
"order_series_by",
"(",
"series",
",",
"order_by",
")",
"last_s",
"=",
"series",
".",
"iloc",
"[",
"series",
".",
"size",
... | Returns the last value of a series.
Args:
series (pandas.Series): column to summarize.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization. | [
"Returns",
"the",
"last",
"value",
"of",
"a",
"series",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/summary_functions.py#L45-L60 | train | 217,840 |
kieferk/dfply | dfply/summary_functions.py | nth | def nth(series, n, order_by=None):
"""
Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
"""
if order_by is not None:
series = order_series_by(series, order_by)
try:
return series.iloc[n]
except:
return np.nan | python | def nth(series, n, order_by=None):
"""
Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
"""
if order_by is not None:
series = order_series_by(series, order_by)
try:
return series.iloc[n]
except:
return np.nan | [
"def",
"nth",
"(",
"series",
",",
"n",
",",
"order_by",
"=",
"None",
")",
":",
"if",
"order_by",
"is",
"not",
"None",
":",
"series",
"=",
"order_series_by",
"(",
"series",
",",
"order_by",
")",
"try",
":",
"return",
"series",
".",
"iloc",
"[",
"n",
... | Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization. | [
"Returns",
"the",
"nth",
"value",
"of",
"a",
"series",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/summary_functions.py#L64-L82 | train | 217,841 |
kieferk/dfply | dfply/summary_functions.py | median | def median(series):
"""
Returns the median value of a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.median()
else:
return np.nan | python | def median(series):
"""
Returns the median value of a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.median()
else:
return np.nan | [
"def",
"median",
"(",
"series",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"series",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"return",
"series",
".",
"median",
"(",
")",
"else",
":",
"return",
"np",
".",
"nan"
] | Returns the median value of a series.
Args:
series (pandas.Series): column to summarize. | [
"Returns",
"the",
"median",
"value",
"of",
"a",
"series",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/summary_functions.py#L153-L164 | train | 217,842 |
kieferk/dfply | dfply/summary_functions.py | var | def var(series):
"""
Returns the variance of values in a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.var()
else:
return np.nan | python | def var(series):
"""
Returns the variance of values in a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.var()
else:
return np.nan | [
"def",
"var",
"(",
"series",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"series",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"return",
"series",
".",
"var",
"(",
")",
"else",
":",
"return",
"np",
".",
"nan"
] | Returns the variance of values in a series.
Args:
series (pandas.Series): column to summarize. | [
"Returns",
"the",
"variance",
"of",
"values",
"in",
"a",
"series",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/summary_functions.py#L168-L178 | train | 217,843 |
kieferk/dfply | dfply/summary_functions.py | sd | def sd(series):
"""
Returns the standard deviation of values in a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.std()
else:
return np.nan | python | def sd(series):
"""
Returns the standard deviation of values in a series.
Args:
series (pandas.Series): column to summarize.
"""
if np.issubdtype(series.dtype, np.number):
return series.std()
else:
return np.nan | [
"def",
"sd",
"(",
"series",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"series",
".",
"dtype",
",",
"np",
".",
"number",
")",
":",
"return",
"series",
".",
"std",
"(",
")",
"else",
":",
"return",
"np",
".",
"nan"
] | Returns the standard deviation of values in a series.
Args:
series (pandas.Series): column to summarize. | [
"Returns",
"the",
"standard",
"deviation",
"of",
"values",
"in",
"a",
"series",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/summary_functions.py#L182-L193 | train | 217,844 |
kieferk/dfply | dfply/join.py | get_join_parameters | def get_join_parameters(join_kwargs):
"""
Convenience function to determine the columns to join the right and
left DataFrames on, as well as any suffixes for the columns.
"""
by = join_kwargs.get('by', None)
suffixes = join_kwargs.get('suffixes', ('_x', '_y'))
if isinstance(by, tuple):
left_on, right_on = by
elif isinstance(by, list):
by = [x if isinstance(x, tuple) else (x, x) for x in by]
left_on, right_on = (list(x) for x in zip(*by))
else:
left_on, right_on = by, by
return left_on, right_on, suffixes | python | def get_join_parameters(join_kwargs):
"""
Convenience function to determine the columns to join the right and
left DataFrames on, as well as any suffixes for the columns.
"""
by = join_kwargs.get('by', None)
suffixes = join_kwargs.get('suffixes', ('_x', '_y'))
if isinstance(by, tuple):
left_on, right_on = by
elif isinstance(by, list):
by = [x if isinstance(x, tuple) else (x, x) for x in by]
left_on, right_on = (list(x) for x in zip(*by))
else:
left_on, right_on = by, by
return left_on, right_on, suffixes | [
"def",
"get_join_parameters",
"(",
"join_kwargs",
")",
":",
"by",
"=",
"join_kwargs",
".",
"get",
"(",
"'by'",
",",
"None",
")",
"suffixes",
"=",
"join_kwargs",
".",
"get",
"(",
"'suffixes'",
",",
"(",
"'_x'",
",",
"'_y'",
")",
")",
"if",
"isinstance",
... | Convenience function to determine the columns to join the right and
left DataFrames on, as well as any suffixes for the columns. | [
"Convenience",
"function",
"to",
"determine",
"the",
"columns",
"to",
"join",
"the",
"right",
"and",
"left",
"DataFrames",
"on",
"as",
"well",
"as",
"any",
"suffixes",
"for",
"the",
"columns",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/join.py#L8-L23 | train | 217,845 |
kieferk/dfply | dfply/join.py | inner_join | def inner_join(df, other, **kwargs):
"""
Joins on values present in both DataFrames.
Args:
df (pandas.DataFrame): Left DataFrame (passed in via pipe)
other (pandas.DataFrame): Right DataFrame
Kwargs:
by (str or list): Columns to join on. If a single string, will join
on that column. If a list of lists which contain strings or
integers, the right/left columns to join on.
suffixes (list): String suffixes to append to column names in left
and right DataFrames.
Example:
a >> inner_join(b, by='x1')
x1 x2 x3
0 A 1 True
1 B 2 False
"""
left_on, right_on, suffixes = get_join_parameters(kwargs)
joined = df.merge(other, how='inner', left_on=left_on,
right_on=right_on, suffixes=suffixes)
return joined | python | def inner_join(df, other, **kwargs):
"""
Joins on values present in both DataFrames.
Args:
df (pandas.DataFrame): Left DataFrame (passed in via pipe)
other (pandas.DataFrame): Right DataFrame
Kwargs:
by (str or list): Columns to join on. If a single string, will join
on that column. If a list of lists which contain strings or
integers, the right/left columns to join on.
suffixes (list): String suffixes to append to column names in left
and right DataFrames.
Example:
a >> inner_join(b, by='x1')
x1 x2 x3
0 A 1 True
1 B 2 False
"""
left_on, right_on, suffixes = get_join_parameters(kwargs)
joined = df.merge(other, how='inner', left_on=left_on,
right_on=right_on, suffixes=suffixes)
return joined | [
"def",
"inner_join",
"(",
"df",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"left_on",
",",
"right_on",
",",
"suffixes",
"=",
"get_join_parameters",
"(",
"kwargs",
")",
"joined",
"=",
"df",
".",
"merge",
"(",
"other",
",",
"how",
"=",
"'inner'",
... | Joins on values present in both DataFrames.
Args:
df (pandas.DataFrame): Left DataFrame (passed in via pipe)
other (pandas.DataFrame): Right DataFrame
Kwargs:
by (str or list): Columns to join on. If a single string, will join
on that column. If a list of lists which contain strings or
integers, the right/left columns to join on.
suffixes (list): String suffixes to append to column names in left
and right DataFrames.
Example:
a >> inner_join(b, by='x1')
x1 x2 x3
0 A 1 True
1 B 2 False | [
"Joins",
"on",
"values",
"present",
"in",
"both",
"DataFrames",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/join.py#L27-L53 | train | 217,846 |
kieferk/dfply | dfply/join.py | anti_join | def anti_join(df, other, **kwargs):
"""
Returns all of the rows in the left DataFrame that do not have a
match in the right DataFrame.
Args:
df (pandas.DataFrame): Left DataFrame (passed in via pipe)
other (pandas.DataFrame): Right DataFrame
Kwargs:
by (str or list): Columns to join on. If a single string, will join
on that column. If a list of lists which contain strings or
integers, the right/left columns to join on.
Example:
a >> anti_join(b, by='x1')
x1 x2
2 C 3
"""
left_on, right_on, suffixes = get_join_parameters(kwargs)
if not right_on:
right_on = [col_name for col_name in df.columns.values.tolist() if col_name in other.columns.values.tolist()]
left_on = right_on
elif not isinstance(right_on, (list, tuple)):
right_on = [right_on]
other_reduced = other[right_on].drop_duplicates()
joined = df.merge(other_reduced, how='left', left_on=left_on,
right_on=right_on, suffixes=('', '_y'),
indicator=True).query('_merge=="left_only"')[df.columns.values.tolist()]
return joined | python | def anti_join(df, other, **kwargs):
"""
Returns all of the rows in the left DataFrame that do not have a
match in the right DataFrame.
Args:
df (pandas.DataFrame): Left DataFrame (passed in via pipe)
other (pandas.DataFrame): Right DataFrame
Kwargs:
by (str or list): Columns to join on. If a single string, will join
on that column. If a list of lists which contain strings or
integers, the right/left columns to join on.
Example:
a >> anti_join(b, by='x1')
x1 x2
2 C 3
"""
left_on, right_on, suffixes = get_join_parameters(kwargs)
if not right_on:
right_on = [col_name for col_name in df.columns.values.tolist() if col_name in other.columns.values.tolist()]
left_on = right_on
elif not isinstance(right_on, (list, tuple)):
right_on = [right_on]
other_reduced = other[right_on].drop_duplicates()
joined = df.merge(other_reduced, how='left', left_on=left_on,
right_on=right_on, suffixes=('', '_y'),
indicator=True).query('_merge=="left_only"')[df.columns.values.tolist()]
return joined | [
"def",
"anti_join",
"(",
"df",
",",
"other",
",",
"*",
"*",
"kwargs",
")",
":",
"left_on",
",",
"right_on",
",",
"suffixes",
"=",
"get_join_parameters",
"(",
"kwargs",
")",
"if",
"not",
"right_on",
":",
"right_on",
"=",
"[",
"col_name",
"for",
"col_name"... | Returns all of the rows in the left DataFrame that do not have a
match in the right DataFrame.
Args:
df (pandas.DataFrame): Left DataFrame (passed in via pipe)
other (pandas.DataFrame): Right DataFrame
Kwargs:
by (str or list): Columns to join on. If a single string, will join
on that column. If a list of lists which contain strings or
integers, the right/left columns to join on.
Example:
a >> anti_join(b, by='x1')
x1 x2
2 C 3 | [
"Returns",
"all",
"of",
"the",
"rows",
"in",
"the",
"left",
"DataFrame",
"that",
"do",
"not",
"have",
"a",
"match",
"in",
"the",
"right",
"DataFrame",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/join.py#L219-L250 | train | 217,847 |
kieferk/dfply | dfply/join.py | bind_rows | def bind_rows(df, other, join='outer', ignore_index=False):
"""
Binds DataFrames "vertically", stacking them together. This is equivalent
to `pd.concat` with `axis=0`.
Args:
df (pandas.DataFrame): Top DataFrame (passed in via pipe).
other (pandas.DataFrame): Bottom DataFrame.
Kwargs:
join (str): One of `"outer"` or `"inner"`. Outer join will preserve
columns not present in both DataFrames, whereas inner joining will
drop them.
ignore_index (bool): Indicates whether to consider pandas indices as
part of the concatenation (defaults to `False`).
"""
df = pd.concat([df, other], join=join, ignore_index=ignore_index, axis=0)
return df | python | def bind_rows(df, other, join='outer', ignore_index=False):
"""
Binds DataFrames "vertically", stacking them together. This is equivalent
to `pd.concat` with `axis=0`.
Args:
df (pandas.DataFrame): Top DataFrame (passed in via pipe).
other (pandas.DataFrame): Bottom DataFrame.
Kwargs:
join (str): One of `"outer"` or `"inner"`. Outer join will preserve
columns not present in both DataFrames, whereas inner joining will
drop them.
ignore_index (bool): Indicates whether to consider pandas indices as
part of the concatenation (defaults to `False`).
"""
df = pd.concat([df, other], join=join, ignore_index=ignore_index, axis=0)
return df | [
"def",
"bind_rows",
"(",
"df",
",",
"other",
",",
"join",
"=",
"'outer'",
",",
"ignore_index",
"=",
"False",
")",
":",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df",
",",
"other",
"]",
",",
"join",
"=",
"join",
",",
"ignore_index",
"=",
"ignore_in... | Binds DataFrames "vertically", stacking them together. This is equivalent
to `pd.concat` with `axis=0`.
Args:
df (pandas.DataFrame): Top DataFrame (passed in via pipe).
other (pandas.DataFrame): Bottom DataFrame.
Kwargs:
join (str): One of `"outer"` or `"inner"`. Outer join will preserve
columns not present in both DataFrames, whereas inner joining will
drop them.
ignore_index (bool): Indicates whether to consider pandas indices as
part of the concatenation (defaults to `False`). | [
"Binds",
"DataFrames",
"vertically",
"stacking",
"them",
"together",
".",
"This",
"is",
"equivalent",
"to",
"pd",
".",
"concat",
"with",
"axis",
"=",
"0",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/join.py#L258-L276 | train | 217,848 |
kieferk/dfply | dfply/reshape.py | arrange | def arrange(df, *args, **kwargs):
"""Calls `pandas.DataFrame.sort_values` to sort a DataFrame according to
criteria.
See:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sort_values.html
For a list of specific keyword arguments for sort_values (which will be
the same in arrange).
Args:
*args: Symbolic, string, integer or lists of those types indicating
columns to sort the DataFrame by.
Kwargs:
**kwargs: Any keyword arguments will be passed through to the pandas
`DataFrame.sort_values` function.
"""
flat_args = [a for a in flatten(args)]
series = [df[arg] if isinstance(arg, str) else
df.iloc[:, arg] if isinstance(arg, int) else
pd.Series(arg) for arg in flat_args]
sorter = pd.concat(series, axis=1).reset_index(drop=True)
sorter = sorter.sort_values(sorter.columns.tolist(), **kwargs)
return df.iloc[sorter.index, :] | python | def arrange(df, *args, **kwargs):
"""Calls `pandas.DataFrame.sort_values` to sort a DataFrame according to
criteria.
See:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sort_values.html
For a list of specific keyword arguments for sort_values (which will be
the same in arrange).
Args:
*args: Symbolic, string, integer or lists of those types indicating
columns to sort the DataFrame by.
Kwargs:
**kwargs: Any keyword arguments will be passed through to the pandas
`DataFrame.sort_values` function.
"""
flat_args = [a for a in flatten(args)]
series = [df[arg] if isinstance(arg, str) else
df.iloc[:, arg] if isinstance(arg, int) else
pd.Series(arg) for arg in flat_args]
sorter = pd.concat(series, axis=1).reset_index(drop=True)
sorter = sorter.sort_values(sorter.columns.tolist(), **kwargs)
return df.iloc[sorter.index, :] | [
"def",
"arrange",
"(",
"df",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"flat_args",
"=",
"[",
"a",
"for",
"a",
"in",
"flatten",
"(",
"args",
")",
"]",
"series",
"=",
"[",
"df",
"[",
"arg",
"]",
"if",
"isinstance",
"(",
"arg",
",",
... | Calls `pandas.DataFrame.sort_values` to sort a DataFrame according to
criteria.
See:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sort_values.html
For a list of specific keyword arguments for sort_values (which will be
the same in arrange).
Args:
*args: Symbolic, string, integer or lists of those types indicating
columns to sort the DataFrame by.
Kwargs:
**kwargs: Any keyword arguments will be passed through to the pandas
`DataFrame.sort_values` function. | [
"Calls",
"pandas",
".",
"DataFrame",
".",
"sort_values",
"to",
"sort",
"a",
"DataFrame",
"according",
"to",
"criteria",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L10-L37 | train | 217,849 |
kieferk/dfply | dfply/reshape.py | rename | def rename(df, **kwargs):
"""Renames columns, where keyword argument values are the current names
of columns and keys are the new names.
Args:
df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe.
Kwargs:
**kwargs: key:value pairs where keys are new names for columns and
values are current names of columns.
"""
return df.rename(columns={v: k for k, v in kwargs.items()}) | python | def rename(df, **kwargs):
"""Renames columns, where keyword argument values are the current names
of columns and keys are the new names.
Args:
df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe.
Kwargs:
**kwargs: key:value pairs where keys are new names for columns and
values are current names of columns.
"""
return df.rename(columns={v: k for k, v in kwargs.items()}) | [
"def",
"rename",
"(",
"df",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"df",
".",
"rename",
"(",
"columns",
"=",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
")"
] | Renames columns, where keyword argument values are the current names
of columns and keys are the new names.
Args:
df (:obj:`pandas.DataFrame`): DataFrame passed in via `>>` pipe.
Kwargs:
**kwargs: key:value pairs where keys are new names for columns and
values are current names of columns. | [
"Renames",
"columns",
"where",
"keyword",
"argument",
"values",
"are",
"the",
"current",
"names",
"of",
"columns",
"and",
"keys",
"are",
"the",
"new",
"names",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L46-L58 | train | 217,850 |
kieferk/dfply | dfply/reshape.py | convert_type | def convert_type(df, columns):
"""
Helper function that attempts to convert columns into their appropriate
data type.
"""
# taken in part from the dplython package
out_df = df.copy()
for col in columns:
column_values = pd.Series(out_df[col].unique())
column_values = column_values[~column_values.isnull()]
# empty
if len(column_values) == 0:
continue
# boolean
if set(column_values.values) < {'True', 'False'}:
out_df[col] = out_df[col].map({'True': True, 'False': False})
continue
# numeric
if pd.to_numeric(column_values, errors='coerce').isnull().sum() == 0:
out_df[col] = pd.to_numeric(out_df[col], errors='ignore')
continue
# datetime
if pd.to_datetime(column_values, errors='coerce').isnull().sum() == 0:
out_df[col] = pd.to_datetime(out_df[col], errors='ignore',
infer_datetime_format=True)
continue
return out_df | python | def convert_type(df, columns):
"""
Helper function that attempts to convert columns into their appropriate
data type.
"""
# taken in part from the dplython package
out_df = df.copy()
for col in columns:
column_values = pd.Series(out_df[col].unique())
column_values = column_values[~column_values.isnull()]
# empty
if len(column_values) == 0:
continue
# boolean
if set(column_values.values) < {'True', 'False'}:
out_df[col] = out_df[col].map({'True': True, 'False': False})
continue
# numeric
if pd.to_numeric(column_values, errors='coerce').isnull().sum() == 0:
out_df[col] = pd.to_numeric(out_df[col], errors='ignore')
continue
# datetime
if pd.to_datetime(column_values, errors='coerce').isnull().sum() == 0:
out_df[col] = pd.to_datetime(out_df[col], errors='ignore',
infer_datetime_format=True)
continue
return out_df | [
"def",
"convert_type",
"(",
"df",
",",
"columns",
")",
":",
"# taken in part from the dplython package",
"out_df",
"=",
"df",
".",
"copy",
"(",
")",
"for",
"col",
"in",
"columns",
":",
"column_values",
"=",
"pd",
".",
"Series",
"(",
"out_df",
"[",
"col",
"... | Helper function that attempts to convert columns into their appropriate
data type. | [
"Helper",
"function",
"that",
"attempts",
"to",
"convert",
"columns",
"into",
"their",
"appropriate",
"data",
"type",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L111-L138 | train | 217,851 |
kieferk/dfply | dfply/reshape.py | spread | def spread(df, key, values, convert=False):
"""
Transforms a "long" DataFrame into a "wide" format using a key and value
column.
If you have a mixed datatype column in your long-format DataFrame then the
default behavior is for the spread columns to be of type `object`, or
string. If you want to try to convert dtypes when spreading, you can set
the convert keyword argument in spread to True.
Args:
key (str, int, or symbolic): Label for the key column.
values (str, int, or symbolic): Label for the values column.
Kwargs:
convert (bool): Boolean indicating whether or not to try and convert
the spread columns to more appropriate data types.
Example:
widened = elongated >> spread(X.variable, X.value)
widened >> head(5)
_ID carat clarity color cut depth price table x y z
0 0 0.23 SI2 E Ideal 61.5 326 55 3.95 3.98 2.43
1 1 0.21 SI1 E Premium 59.8 326 61 3.89 3.84 2.31
2 10 0.3 SI1 J Good 64 339 55 4.25 4.28 2.73
3 100 0.75 SI1 D Very Good 63.2 2760 56 5.8 5.75 3.65
4 1000 0.75 SI1 D Ideal 62.3 2898 55 5.83 5.8 3.62
"""
# Taken mostly from dplython package
columns = df.columns.tolist()
id_cols = [col for col in columns if not col in [key, values]]
temp_index = ['' for i in range(len(df))]
for id_col in id_cols:
temp_index += df[id_col].map(str)
out_df = df.assign(temp_index=temp_index)
out_df = out_df.set_index('temp_index')
spread_data = out_df[[key, values]]
if not all(spread_data.groupby([spread_data.index, key]).agg(
'count').reset_index()[values] < 2):
raise ValueError('Duplicate identifiers')
spread_data = spread_data.pivot(columns=key, values=values)
if convert and (out_df[values].dtype.kind in 'OSaU'):
columns_to_convert = [col for col in spread_data if col not in columns]
spread_data = convert_type(spread_data, columns_to_convert)
out_df = out_df[id_cols].drop_duplicates()
out_df = out_df.merge(spread_data, left_index=True, right_index=True).reset_index(drop=True)
out_df = (out_df >> arrange(id_cols)).reset_index(drop=True)
return out_df | python | def spread(df, key, values, convert=False):
"""
Transforms a "long" DataFrame into a "wide" format using a key and value
column.
If you have a mixed datatype column in your long-format DataFrame then the
default behavior is for the spread columns to be of type `object`, or
string. If you want to try to convert dtypes when spreading, you can set
the convert keyword argument in spread to True.
Args:
key (str, int, or symbolic): Label for the key column.
values (str, int, or symbolic): Label for the values column.
Kwargs:
convert (bool): Boolean indicating whether or not to try and convert
the spread columns to more appropriate data types.
Example:
widened = elongated >> spread(X.variable, X.value)
widened >> head(5)
_ID carat clarity color cut depth price table x y z
0 0 0.23 SI2 E Ideal 61.5 326 55 3.95 3.98 2.43
1 1 0.21 SI1 E Premium 59.8 326 61 3.89 3.84 2.31
2 10 0.3 SI1 J Good 64 339 55 4.25 4.28 2.73
3 100 0.75 SI1 D Very Good 63.2 2760 56 5.8 5.75 3.65
4 1000 0.75 SI1 D Ideal 62.3 2898 55 5.83 5.8 3.62
"""
# Taken mostly from dplython package
columns = df.columns.tolist()
id_cols = [col for col in columns if not col in [key, values]]
temp_index = ['' for i in range(len(df))]
for id_col in id_cols:
temp_index += df[id_col].map(str)
out_df = df.assign(temp_index=temp_index)
out_df = out_df.set_index('temp_index')
spread_data = out_df[[key, values]]
if not all(spread_data.groupby([spread_data.index, key]).agg(
'count').reset_index()[values] < 2):
raise ValueError('Duplicate identifiers')
spread_data = spread_data.pivot(columns=key, values=values)
if convert and (out_df[values].dtype.kind in 'OSaU'):
columns_to_convert = [col for col in spread_data if col not in columns]
spread_data = convert_type(spread_data, columns_to_convert)
out_df = out_df[id_cols].drop_duplicates()
out_df = out_df.merge(spread_data, left_index=True, right_index=True).reset_index(drop=True)
out_df = (out_df >> arrange(id_cols)).reset_index(drop=True)
return out_df | [
"def",
"spread",
"(",
"df",
",",
"key",
",",
"values",
",",
"convert",
"=",
"False",
")",
":",
"# Taken mostly from dplython package",
"columns",
"=",
"df",
".",
"columns",
".",
"tolist",
"(",
")",
"id_cols",
"=",
"[",
"col",
"for",
"col",
"in",
"columns... | Transforms a "long" DataFrame into a "wide" format using a key and value
column.
If you have a mixed datatype column in your long-format DataFrame then the
default behavior is for the spread columns to be of type `object`, or
string. If you want to try to convert dtypes when spreading, you can set
the convert keyword argument in spread to True.
Args:
key (str, int, or symbolic): Label for the key column.
values (str, int, or symbolic): Label for the values column.
Kwargs:
convert (bool): Boolean indicating whether or not to try and convert
the spread columns to more appropriate data types.
Example:
widened = elongated >> spread(X.variable, X.value)
widened >> head(5)
_ID carat clarity color cut depth price table x y z
0 0 0.23 SI2 E Ideal 61.5 326 55 3.95 3.98 2.43
1 1 0.21 SI1 E Premium 59.8 326 61 3.89 3.84 2.31
2 10 0.3 SI1 J Good 64 339 55 4.25 4.28 2.73
3 100 0.75 SI1 D Very Good 63.2 2760 56 5.8 5.75 3.65
4 1000 0.75 SI1 D Ideal 62.3 2898 55 5.83 5.8 3.62 | [
"Transforms",
"a",
"long",
"DataFrame",
"into",
"a",
"wide",
"format",
"using",
"a",
"key",
"and",
"value",
"column",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L143-L201 | train | 217,852 |
kieferk/dfply | dfply/reshape.py | separate | def separate(df, column, into, sep="[\W_]+", remove=True, convert=False,
extra='drop', fill='right'):
"""
Splits columns into multiple columns.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
column (str, symbolic): Label of column to split.
into (list): List of string names for new columns.
Kwargs:
sep (str or list): If a string, the regex string used to split the
column. If a list, a list of integer positions to split strings
on.
remove (bool): Boolean indicating whether to remove the original column.
convert (bool): Boolean indicating whether the new columns should be
converted to the appropriate type.
extra (str): either `'drop'`, where split pieces beyond the specified
new columns are dropped, or `'merge'`, where the final split piece
contains the remainder of the original column.
fill (str): either `'right'`, where `np.nan` values are filled in the
right-most columns for missing pieces, or `'left'` where `np.nan`
values are filled in the left-most columns.
"""
assert isinstance(into, (tuple, list))
if isinstance(sep, (tuple, list)):
inds = [0] + list(sep)
if len(inds) > len(into):
if extra == 'drop':
inds = inds[:len(into) + 1]
elif extra == 'merge':
inds = inds[:len(into)] + [None]
else:
inds = inds + [None]
splits = df[column].map(lambda x: [str(x)[slice(inds[i], inds[i + 1])]
if i < len(inds) - 1 else np.nan
for i in range(len(into))])
else:
maxsplit = len(into) - 1 if extra == 'merge' else 0
splits = df[column].map(lambda x: re.split(sep, x, maxsplit))
right_filler = lambda x: x + [np.nan for i in range(len(into) - len(x))]
left_filler = lambda x: [np.nan for i in range(len(into) - len(x))] + x
if fill == 'right':
splits = [right_filler(x) for x in splits]
elif fill == 'left':
splits = [left_filler(x) for x in splits]
for i, split_col in enumerate(into):
df[split_col] = [x[i] if not x[i] == '' else np.nan for x in splits]
if convert:
df = convert_type(df, into)
if remove:
df.drop(column, axis=1, inplace=True)
return df | python | def separate(df, column, into, sep="[\W_]+", remove=True, convert=False,
extra='drop', fill='right'):
"""
Splits columns into multiple columns.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
column (str, symbolic): Label of column to split.
into (list): List of string names for new columns.
Kwargs:
sep (str or list): If a string, the regex string used to split the
column. If a list, a list of integer positions to split strings
on.
remove (bool): Boolean indicating whether to remove the original column.
convert (bool): Boolean indicating whether the new columns should be
converted to the appropriate type.
extra (str): either `'drop'`, where split pieces beyond the specified
new columns are dropped, or `'merge'`, where the final split piece
contains the remainder of the original column.
fill (str): either `'right'`, where `np.nan` values are filled in the
right-most columns for missing pieces, or `'left'` where `np.nan`
values are filled in the left-most columns.
"""
assert isinstance(into, (tuple, list))
if isinstance(sep, (tuple, list)):
inds = [0] + list(sep)
if len(inds) > len(into):
if extra == 'drop':
inds = inds[:len(into) + 1]
elif extra == 'merge':
inds = inds[:len(into)] + [None]
else:
inds = inds + [None]
splits = df[column].map(lambda x: [str(x)[slice(inds[i], inds[i + 1])]
if i < len(inds) - 1 else np.nan
for i in range(len(into))])
else:
maxsplit = len(into) - 1 if extra == 'merge' else 0
splits = df[column].map(lambda x: re.split(sep, x, maxsplit))
right_filler = lambda x: x + [np.nan for i in range(len(into) - len(x))]
left_filler = lambda x: [np.nan for i in range(len(into) - len(x))] + x
if fill == 'right':
splits = [right_filler(x) for x in splits]
elif fill == 'left':
splits = [left_filler(x) for x in splits]
for i, split_col in enumerate(into):
df[split_col] = [x[i] if not x[i] == '' else np.nan for x in splits]
if convert:
df = convert_type(df, into)
if remove:
df.drop(column, axis=1, inplace=True)
return df | [
"def",
"separate",
"(",
"df",
",",
"column",
",",
"into",
",",
"sep",
"=",
"\"[\\W_]+\"",
",",
"remove",
"=",
"True",
",",
"convert",
"=",
"False",
",",
"extra",
"=",
"'drop'",
",",
"fill",
"=",
"'right'",
")",
":",
"assert",
"isinstance",
"(",
"into... | Splits columns into multiple columns.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
column (str, symbolic): Label of column to split.
into (list): List of string names for new columns.
Kwargs:
sep (str or list): If a string, the regex string used to split the
column. If a list, a list of integer positions to split strings
on.
remove (bool): Boolean indicating whether to remove the original column.
convert (bool): Boolean indicating whether the new columns should be
converted to the appropriate type.
extra (str): either `'drop'`, where split pieces beyond the specified
new columns are dropped, or `'merge'`, where the final split piece
contains the remainder of the original column.
fill (str): either `'right'`, where `np.nan` values are filled in the
right-most columns for missing pieces, or `'left'` where `np.nan`
values are filled in the left-most columns. | [
"Splits",
"columns",
"into",
"multiple",
"columns",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L210-L272 | train | 217,853 |
kieferk/dfply | dfply/reshape.py | unite | def unite(df, colname, *args, **kwargs):
"""
Does the inverse of `separate`, joining columns together by a specified
separator.
Any columns that are not strings will be converted to strings.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
colname (str): the name of the new joined column.
*args: list of columns to be joined, which can be strings, symbolic, or
integer positions.
Kwargs:
sep (str): the string separator to join the columns with.
remove (bool): Boolean indicating whether or not to remove the
original columns.
na_action (str): can be one of `'maintain'` (the default),
'`ignore'`, or `'as_string'`. The default will make the new column
row a `NaN` value if any of the original column cells at that
row contained `NaN`. '`ignore'` will treat any `NaN` value as an
empty string during joining. `'as_string'` will convert any `NaN`
value to the string `'nan'` prior to joining.
"""
to_unite = list([a for a in flatten(args)])
sep = kwargs.get('sep', '_')
remove = kwargs.get('remove', True)
# possible na_action values
# ignore: empty string
# maintain: keep as np.nan (default)
# as_string: becomes string 'nan'
na_action = kwargs.get('na_action', 'maintain')
# print(to_unite, sep, remove, na_action)
if na_action == 'maintain':
df[colname] = df[to_unite].apply(lambda x: np.nan if any(x.isnull())
else sep.join(x.map(str)), axis=1)
elif na_action == 'ignore':
df[colname] = df[to_unite].apply(lambda x: sep.join(x[~x.isnull()].map(str)),
axis=1)
elif na_action == 'as_string':
df[colname] = df[to_unite].astype(str).apply(lambda x: sep.join(x), axis=1)
if remove:
df.drop(to_unite, axis=1, inplace=True)
return df | python | def unite(df, colname, *args, **kwargs):
"""
Does the inverse of `separate`, joining columns together by a specified
separator.
Any columns that are not strings will be converted to strings.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
colname (str): the name of the new joined column.
*args: list of columns to be joined, which can be strings, symbolic, or
integer positions.
Kwargs:
sep (str): the string separator to join the columns with.
remove (bool): Boolean indicating whether or not to remove the
original columns.
na_action (str): can be one of `'maintain'` (the default),
'`ignore'`, or `'as_string'`. The default will make the new column
row a `NaN` value if any of the original column cells at that
row contained `NaN`. '`ignore'` will treat any `NaN` value as an
empty string during joining. `'as_string'` will convert any `NaN`
value to the string `'nan'` prior to joining.
"""
to_unite = list([a for a in flatten(args)])
sep = kwargs.get('sep', '_')
remove = kwargs.get('remove', True)
# possible na_action values
# ignore: empty string
# maintain: keep as np.nan (default)
# as_string: becomes string 'nan'
na_action = kwargs.get('na_action', 'maintain')
# print(to_unite, sep, remove, na_action)
if na_action == 'maintain':
df[colname] = df[to_unite].apply(lambda x: np.nan if any(x.isnull())
else sep.join(x.map(str)), axis=1)
elif na_action == 'ignore':
df[colname] = df[to_unite].apply(lambda x: sep.join(x[~x.isnull()].map(str)),
axis=1)
elif na_action == 'as_string':
df[colname] = df[to_unite].astype(str).apply(lambda x: sep.join(x), axis=1)
if remove:
df.drop(to_unite, axis=1, inplace=True)
return df | [
"def",
"unite",
"(",
"df",
",",
"colname",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"to_unite",
"=",
"list",
"(",
"[",
"a",
"for",
"a",
"in",
"flatten",
"(",
"args",
")",
"]",
")",
"sep",
"=",
"kwargs",
".",
"get",
"(",
"'sep'",
"... | Does the inverse of `separate`, joining columns together by a specified
separator.
Any columns that are not strings will be converted to strings.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
colname (str): the name of the new joined column.
*args: list of columns to be joined, which can be strings, symbolic, or
integer positions.
Kwargs:
sep (str): the string separator to join the columns with.
remove (bool): Boolean indicating whether or not to remove the
original columns.
na_action (str): can be one of `'maintain'` (the default),
'`ignore'`, or `'as_string'`. The default will make the new column
row a `NaN` value if any of the original column cells at that
row contained `NaN`. '`ignore'` will treat any `NaN` value as an
empty string during joining. `'as_string'` will convert any `NaN`
value to the string `'nan'` prior to joining. | [
"Does",
"the",
"inverse",
"of",
"separate",
"joining",
"columns",
"together",
"by",
"a",
"specified",
"separator",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/reshape.py#L281-L329 | train | 217,854 |
kieferk/dfply | dfply/set_ops.py | validate_set_ops | def validate_set_ops(df, other):
"""
Helper function to ensure that DataFrames are valid for set operations.
Columns must be the same name in the same order, and indices must be of the
same dimension with the same names.
"""
if df.columns.values.tolist() != other.columns.values.tolist():
not_in_df = [col for col in other.columns if col not in df.columns]
not_in_other = [col for col in df.columns if col not in other.columns]
error_string = 'Error: not compatible.'
if len(not_in_df):
error_string += ' Cols in y but not x: ' + str(not_in_df) + '.'
if len(not_in_other):
error_string += ' Cols in x but not y: ' + str(not_in_other) + '.'
raise ValueError(error_string)
if len(df.index.names) != len(other.index.names):
raise ValueError('Index dimension mismatch')
if df.index.names != other.index.names:
raise ValueError('Index mismatch')
else:
return | python | def validate_set_ops(df, other):
"""
Helper function to ensure that DataFrames are valid for set operations.
Columns must be the same name in the same order, and indices must be of the
same dimension with the same names.
"""
if df.columns.values.tolist() != other.columns.values.tolist():
not_in_df = [col for col in other.columns if col not in df.columns]
not_in_other = [col for col in df.columns if col not in other.columns]
error_string = 'Error: not compatible.'
if len(not_in_df):
error_string += ' Cols in y but not x: ' + str(not_in_df) + '.'
if len(not_in_other):
error_string += ' Cols in x but not y: ' + str(not_in_other) + '.'
raise ValueError(error_string)
if len(df.index.names) != len(other.index.names):
raise ValueError('Index dimension mismatch')
if df.index.names != other.index.names:
raise ValueError('Index mismatch')
else:
return | [
"def",
"validate_set_ops",
"(",
"df",
",",
"other",
")",
":",
"if",
"df",
".",
"columns",
".",
"values",
".",
"tolist",
"(",
")",
"!=",
"other",
".",
"columns",
".",
"values",
".",
"tolist",
"(",
")",
":",
"not_in_df",
"=",
"[",
"col",
"for",
"col"... | Helper function to ensure that DataFrames are valid for set operations.
Columns must be the same name in the same order, and indices must be of the
same dimension with the same names. | [
"Helper",
"function",
"to",
"ensure",
"that",
"DataFrames",
"are",
"valid",
"for",
"set",
"operations",
".",
"Columns",
"must",
"be",
"the",
"same",
"name",
"in",
"the",
"same",
"order",
"and",
"indices",
"must",
"be",
"of",
"the",
"same",
"dimension",
"wi... | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/set_ops.py#L6-L27 | train | 217,855 |
kieferk/dfply | dfply/set_ops.py | union | def union(df, other, index=False, keep='first'):
"""
Returns rows that appear in either DataFrame.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
stacked = df.append(other)
if index:
stacked_reset_indexes = stacked.reset_index()
index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns]
index_name = df.index.names
return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols)
return_df.index.names = index_name
return return_df
else:
return stacked.drop_duplicates(keep=keep) | python | def union(df, other, index=False, keep='first'):
"""
Returns rows that appear in either DataFrame.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
stacked = df.append(other)
if index:
stacked_reset_indexes = stacked.reset_index()
index_cols = [col for col in stacked_reset_indexes.columns if col not in df.columns]
index_name = df.index.names
return_df = stacked_reset_indexes.drop_duplicates(keep=keep).set_index(index_cols)
return_df.index.names = index_name
return return_df
else:
return stacked.drop_duplicates(keep=keep) | [
"def",
"union",
"(",
"df",
",",
"other",
",",
"index",
"=",
"False",
",",
"keep",
"=",
"'first'",
")",
":",
"validate_set_ops",
"(",
"df",
",",
"other",
")",
"stacked",
"=",
"df",
".",
"append",
"(",
"other",
")",
"if",
"index",
":",
"stacked_reset_i... | Returns rows that appear in either DataFrame.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`. | [
"Returns",
"rows",
"that",
"appear",
"in",
"either",
"DataFrame",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/set_ops.py#L35-L60 | train | 217,856 |
kieferk/dfply | dfply/set_ops.py | intersect | def intersect(df, other, index=False, keep='first'):
"""
Returns rows that appear in both DataFrames.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
if index:
df_reset_index = df.reset_index()
other_reset_index = other.reset_index()
index_cols = [col for col in df_reset_index.columns if col not in df.columns]
df_index_names = df.index.names
return_df = (pd.merge(df_reset_index, other_reset_index,
how='inner',
left_on=df_reset_index.columns.values.tolist(),
right_on=df_reset_index.columns.values.tolist())
.set_index(index_cols))
return_df.index.names = df_index_names
return_df = return_df.drop_duplicates(keep=keep)
return return_df
else:
return_df = pd.merge(df, other,
how='inner',
left_on=df.columns.values.tolist(),
right_on=df.columns.values.tolist())
return_df = return_df.drop_duplicates(keep=keep)
return return_df | python | def intersect(df, other, index=False, keep='first'):
"""
Returns rows that appear in both DataFrames.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`.
"""
validate_set_ops(df, other)
if index:
df_reset_index = df.reset_index()
other_reset_index = other.reset_index()
index_cols = [col for col in df_reset_index.columns if col not in df.columns]
df_index_names = df.index.names
return_df = (pd.merge(df_reset_index, other_reset_index,
how='inner',
left_on=df_reset_index.columns.values.tolist(),
right_on=df_reset_index.columns.values.tolist())
.set_index(index_cols))
return_df.index.names = df_index_names
return_df = return_df.drop_duplicates(keep=keep)
return return_df
else:
return_df = pd.merge(df, other,
how='inner',
left_on=df.columns.values.tolist(),
right_on=df.columns.values.tolist())
return_df = return_df.drop_duplicates(keep=keep)
return return_df | [
"def",
"intersect",
"(",
"df",
",",
"other",
",",
"index",
"=",
"False",
",",
"keep",
"=",
"'first'",
")",
":",
"validate_set_ops",
"(",
"df",
",",
"other",
")",
"if",
"index",
":",
"df_reset_index",
"=",
"df",
".",
"reset_index",
"(",
")",
"other_rese... | Returns rows that appear in both DataFrames.
Args:
df (pandas.DataFrame): data passed in through the pipe.
other (pandas.DataFrame): other DataFrame to use for set operation with
the first.
Kwargs:
index (bool): Boolean indicating whether to consider the pandas index
as part of the set operation (default `False`).
keep (str): Indicates which duplicate should be kept. Options are `'first'`
and `'last'`. | [
"Returns",
"rows",
"that",
"appear",
"in",
"both",
"DataFrames",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/set_ops.py#L69-L105 | train | 217,857 |
kieferk/dfply | dfply/transform.py | transmute | def transmute(df, *keep_columns, **kwargs):
"""
Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12
"""
keep_cols = []
for col in flatten(keep_columns):
try:
keep_cols.append(col.name)
except:
if isinstance(col, str):
keep_cols.append(col)
elif isinstance(col, int):
keep_cols.append(df.columns[col])
df = df.assign(**kwargs)
columns = [k for k in kwargs.keys()] + list(keep_cols)
return df[columns] | python | def transmute(df, *keep_columns, **kwargs):
"""
Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12
"""
keep_cols = []
for col in flatten(keep_columns):
try:
keep_cols.append(col.name)
except:
if isinstance(col, str):
keep_cols.append(col)
elif isinstance(col, int):
keep_cols.append(df.columns[col])
df = df.assign(**kwargs)
columns = [k for k in kwargs.keys()] + list(keep_cols)
return df[columns] | [
"def",
"transmute",
"(",
"df",
",",
"*",
"keep_columns",
",",
"*",
"*",
"kwargs",
")",
":",
"keep_cols",
"=",
"[",
"]",
"for",
"col",
"in",
"flatten",
"(",
"keep_columns",
")",
":",
"try",
":",
"keep_cols",
".",
"append",
"(",
"col",
".",
"name",
"... | Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12 | [
"Creates",
"columns",
"and",
"then",
"returns",
"those",
"new",
"columns",
"and",
"optionally",
"specified",
"original",
"columns",
"from",
"the",
"DataFrame",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/transform.py#L64-L101 | train | 217,858 |
kieferk/dfply | dfply/vector.py | coalesce | def coalesce(*series):
"""
Takes the first non-NaN value in order across the specified series,
returning a new series. Mimics the coalesce function in dplyr and SQL.
Args:
*series: Series objects, typically represented in their symbolic form
(like X.series).
Example:
df = pd.DataFrame({
'a':[1,np.nan,np.nan,np.nan,np.nan],
'b':[2,3,np.nan,np.nan,np.nan],
'c':[np.nan,np.nan,4,5,np.nan],
'd':[6,7,8,9,np.nan]
})
df >> transmute(coal=coalesce(X.a, X.b, X.c, X.d))
coal
0 1
1 3
2 4
3 5
4 np.nan
"""
series = [pd.Series(s) for s in series]
coalescer = pd.concat(series, axis=1)
min_nonna = np.argmin(pd.isnull(coalescer).values, axis=1)
min_nonna = [coalescer.columns[i] for i in min_nonna]
return coalescer.lookup(np.arange(coalescer.shape[0]), min_nonna) | python | def coalesce(*series):
"""
Takes the first non-NaN value in order across the specified series,
returning a new series. Mimics the coalesce function in dplyr and SQL.
Args:
*series: Series objects, typically represented in their symbolic form
(like X.series).
Example:
df = pd.DataFrame({
'a':[1,np.nan,np.nan,np.nan,np.nan],
'b':[2,3,np.nan,np.nan,np.nan],
'c':[np.nan,np.nan,4,5,np.nan],
'd':[6,7,8,9,np.nan]
})
df >> transmute(coal=coalesce(X.a, X.b, X.c, X.d))
coal
0 1
1 3
2 4
3 5
4 np.nan
"""
series = [pd.Series(s) for s in series]
coalescer = pd.concat(series, axis=1)
min_nonna = np.argmin(pd.isnull(coalescer).values, axis=1)
min_nonna = [coalescer.columns[i] for i in min_nonna]
return coalescer.lookup(np.arange(coalescer.shape[0]), min_nonna) | [
"def",
"coalesce",
"(",
"*",
"series",
")",
":",
"series",
"=",
"[",
"pd",
".",
"Series",
"(",
"s",
")",
"for",
"s",
"in",
"series",
"]",
"coalescer",
"=",
"pd",
".",
"concat",
"(",
"series",
",",
"axis",
"=",
"1",
")",
"min_nonna",
"=",
"np",
... | Takes the first non-NaN value in order across the specified series,
returning a new series. Mimics the coalesce function in dplyr and SQL.
Args:
*series: Series objects, typically represented in their symbolic form
(like X.series).
Example:
df = pd.DataFrame({
'a':[1,np.nan,np.nan,np.nan,np.nan],
'b':[2,3,np.nan,np.nan,np.nan],
'c':[np.nan,np.nan,4,5,np.nan],
'd':[6,7,8,9,np.nan]
})
df >> transmute(coal=coalesce(X.a, X.b, X.c, X.d))
coal
0 1
1 3
2 4
3 5
4 np.nan | [
"Takes",
"the",
"first",
"non",
"-",
"NaN",
"value",
"in",
"order",
"across",
"the",
"specified",
"series",
"returning",
"a",
"new",
"series",
".",
"Mimics",
"the",
"coalesce",
"function",
"in",
"dplyr",
"and",
"SQL",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/vector.py#L80-L110 | train | 217,859 |
kieferk/dfply | dfply/vector.py | case_when | def case_when(*conditions):
"""
Functions as a switch statement, creating a new series out of logical
conditions specified by 2-item lists where the left-hand item is the
logical condition and the right-hand item is the value where that
condition is true.
Conditions should go from the most specific to the most general. A
conditional that appears earlier in the series will "overwrite" one that
appears later. Think of it like a series of if-else statements.
The logicals and values of the condition pairs must be all the same
length, or length 1. Logicals can be vectors of booleans or a single
boolean (`True`, for example, can be the logical statement for the
final conditional to catch all remaining.).
Args:
*conditions: Each condition should be a list with two values. The first
value is a boolean or vector of booleans that specify indices in
which the condition is met. The second value is a vector of values
or single value specifying the outcome where that condition is met.
Example:
df = pd.DataFrame({
'num':np.arange(16)
})
df >> mutate(strnum=case_when([X.num % 15 == 0, 'fizzbuzz'],
[X.num % 3 == 0, 'fizz'],
[X.num % 5 == 0, 'buzz'],
[True, X.num.astype(str)]))
num strnum
0 0 fizzbuzz
1 1 1
2 2 2
3 3 fizz
4 4 4
5 5 buzz
6 6 fizz
7 7 7
8 8 8
9 9 fizz
10 10 buzz
11 11 11
12 12 fizz
13 13 13
14 14 14
15 15 fizzbuzz
"""
lengths = []
for logical, outcome in conditions:
if isinstance(logical, collections.Iterable):
lengths.append(len(logical))
if isinstance(outcome, collections.Iterable) and not isinstance(outcome, str):
lengths.append(len(outcome))
unique_lengths = np.unique(lengths)
assert len(unique_lengths) == 1
output_len = unique_lengths[0]
output = []
for logical, outcome in conditions:
if isinstance(logical, bool):
logical = np.repeat(logical, output_len)
if isinstance(logical, pd.Series):
logical = logical.values
if not isinstance(outcome, collections.Iterable) or isinstance(outcome, str):
outcome = pd.Series(np.repeat(outcome, output_len))
outcome[~logical] = np.nan
output.append(outcome)
return coalesce(*output) | python | def case_when(*conditions):
"""
Functions as a switch statement, creating a new series out of logical
conditions specified by 2-item lists where the left-hand item is the
logical condition and the right-hand item is the value where that
condition is true.
Conditions should go from the most specific to the most general. A
conditional that appears earlier in the series will "overwrite" one that
appears later. Think of it like a series of if-else statements.
The logicals and values of the condition pairs must be all the same
length, or length 1. Logicals can be vectors of booleans or a single
boolean (`True`, for example, can be the logical statement for the
final conditional to catch all remaining.).
Args:
*conditions: Each condition should be a list with two values. The first
value is a boolean or vector of booleans that specify indices in
which the condition is met. The second value is a vector of values
or single value specifying the outcome where that condition is met.
Example:
df = pd.DataFrame({
'num':np.arange(16)
})
df >> mutate(strnum=case_when([X.num % 15 == 0, 'fizzbuzz'],
[X.num % 3 == 0, 'fizz'],
[X.num % 5 == 0, 'buzz'],
[True, X.num.astype(str)]))
num strnum
0 0 fizzbuzz
1 1 1
2 2 2
3 3 fizz
4 4 4
5 5 buzz
6 6 fizz
7 7 7
8 8 8
9 9 fizz
10 10 buzz
11 11 11
12 12 fizz
13 13 13
14 14 14
15 15 fizzbuzz
"""
lengths = []
for logical, outcome in conditions:
if isinstance(logical, collections.Iterable):
lengths.append(len(logical))
if isinstance(outcome, collections.Iterable) and not isinstance(outcome, str):
lengths.append(len(outcome))
unique_lengths = np.unique(lengths)
assert len(unique_lengths) == 1
output_len = unique_lengths[0]
output = []
for logical, outcome in conditions:
if isinstance(logical, bool):
logical = np.repeat(logical, output_len)
if isinstance(logical, pd.Series):
logical = logical.values
if not isinstance(outcome, collections.Iterable) or isinstance(outcome, str):
outcome = pd.Series(np.repeat(outcome, output_len))
outcome[~logical] = np.nan
output.append(outcome)
return coalesce(*output) | [
"def",
"case_when",
"(",
"*",
"conditions",
")",
":",
"lengths",
"=",
"[",
"]",
"for",
"logical",
",",
"outcome",
"in",
"conditions",
":",
"if",
"isinstance",
"(",
"logical",
",",
"collections",
".",
"Iterable",
")",
":",
"lengths",
".",
"append",
"(",
... | Functions as a switch statement, creating a new series out of logical
conditions specified by 2-item lists where the left-hand item is the
logical condition and the right-hand item is the value where that
condition is true.
Conditions should go from the most specific to the most general. A
conditional that appears earlier in the series will "overwrite" one that
appears later. Think of it like a series of if-else statements.
The logicals and values of the condition pairs must be all the same
length, or length 1. Logicals can be vectors of booleans or a single
boolean (`True`, for example, can be the logical statement for the
final conditional to catch all remaining.).
Args:
*conditions: Each condition should be a list with two values. The first
value is a boolean or vector of booleans that specify indices in
which the condition is met. The second value is a vector of values
or single value specifying the outcome where that condition is met.
Example:
df = pd.DataFrame({
'num':np.arange(16)
})
df >> mutate(strnum=case_when([X.num % 15 == 0, 'fizzbuzz'],
[X.num % 3 == 0, 'fizz'],
[X.num % 5 == 0, 'buzz'],
[True, X.num.astype(str)]))
num strnum
0 0 fizzbuzz
1 1 1
2 2 2
3 3 fizz
4 4 4
5 5 buzz
6 6 fizz
7 7 7
8 8 8
9 9 fizz
10 10 buzz
11 11 11
12 12 fizz
13 13 13
14 14 14
15 15 fizzbuzz | [
"Functions",
"as",
"a",
"switch",
"statement",
"creating",
"a",
"new",
"series",
"out",
"of",
"logical",
"conditions",
"specified",
"by",
"2",
"-",
"item",
"lists",
"where",
"the",
"left",
"-",
"hand",
"item",
"is",
"the",
"logical",
"condition",
"and",
"t... | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/vector.py#L118-L189 | train | 217,860 |
kieferk/dfply | dfply/vector.py | if_else | def if_else(condition, when_true, otherwise):
"""
Wraps creation of a series based on if-else conditional logic into a function
call.
Provide a boolean vector condition, value(s) when true, and value(s)
when false, and a vector will be returned the same length as the conditional
vector according to the logical statement.
Args:
condition: A boolean vector representing the condition. This is often
a logical statement with a symbolic series.
when_true: A vector the same length as the condition vector or a single
value to apply when the condition is `True`.
otherwise: A vector the same length as the condition vector or a single
value to apply when the condition is `False`.
Example:
df = pd.DataFrame
"""
if not isinstance(when_true, collections.Iterable) or isinstance(when_true, str):
when_true = np.repeat(when_true, len(condition))
if not isinstance(otherwise, collections.Iterable) or isinstance(otherwise, str):
otherwise = np.repeat(otherwise, len(condition))
assert (len(condition) == len(when_true)) and (len(condition) == len(otherwise))
if isinstance(when_true, pd.Series):
when_true = when_true.values
if isinstance(otherwise, pd.Series):
otherwise = otherwise.values
output = np.array([when_true[i] if c else otherwise[i]
for i, c in enumerate(condition)])
return output | python | def if_else(condition, when_true, otherwise):
"""
Wraps creation of a series based on if-else conditional logic into a function
call.
Provide a boolean vector condition, value(s) when true, and value(s)
when false, and a vector will be returned the same length as the conditional
vector according to the logical statement.
Args:
condition: A boolean vector representing the condition. This is often
a logical statement with a symbolic series.
when_true: A vector the same length as the condition vector or a single
value to apply when the condition is `True`.
otherwise: A vector the same length as the condition vector or a single
value to apply when the condition is `False`.
Example:
df = pd.DataFrame
"""
if not isinstance(when_true, collections.Iterable) or isinstance(when_true, str):
when_true = np.repeat(when_true, len(condition))
if not isinstance(otherwise, collections.Iterable) or isinstance(otherwise, str):
otherwise = np.repeat(otherwise, len(condition))
assert (len(condition) == len(when_true)) and (len(condition) == len(otherwise))
if isinstance(when_true, pd.Series):
when_true = when_true.values
if isinstance(otherwise, pd.Series):
otherwise = otherwise.values
output = np.array([when_true[i] if c else otherwise[i]
for i, c in enumerate(condition)])
return output | [
"def",
"if_else",
"(",
"condition",
",",
"when_true",
",",
"otherwise",
")",
":",
"if",
"not",
"isinstance",
"(",
"when_true",
",",
"collections",
".",
"Iterable",
")",
"or",
"isinstance",
"(",
"when_true",
",",
"str",
")",
":",
"when_true",
"=",
"np",
"... | Wraps creation of a series based on if-else conditional logic into a function
call.
Provide a boolean vector condition, value(s) when true, and value(s)
when false, and a vector will be returned the same length as the conditional
vector according to the logical statement.
Args:
condition: A boolean vector representing the condition. This is often
a logical statement with a symbolic series.
when_true: A vector the same length as the condition vector or a single
value to apply when the condition is `True`.
otherwise: A vector the same length as the condition vector or a single
value to apply when the condition is `False`.
Example:
df = pd.DataFrame | [
"Wraps",
"creation",
"of",
"a",
"series",
"based",
"on",
"if",
"-",
"else",
"conditional",
"logic",
"into",
"a",
"function",
"call",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/vector.py#L197-L231 | train | 217,861 |
kieferk/dfply | dfply/vector.py | na_if | def na_if(series, *values):
"""
If values in a series match a specified value, change them to `np.nan`.
Args:
series: Series or vector, often symbolic.
*values: Value(s) to convert to `np.nan` in the series.
"""
series = pd.Series(series)
series[series.isin(values)] = np.nan
return series | python | def na_if(series, *values):
"""
If values in a series match a specified value, change them to `np.nan`.
Args:
series: Series or vector, often symbolic.
*values: Value(s) to convert to `np.nan` in the series.
"""
series = pd.Series(series)
series[series.isin(values)] = np.nan
return series | [
"def",
"na_if",
"(",
"series",
",",
"*",
"values",
")",
":",
"series",
"=",
"pd",
".",
"Series",
"(",
"series",
")",
"series",
"[",
"series",
".",
"isin",
"(",
"values",
")",
"]",
"=",
"np",
".",
"nan",
"return",
"series"
] | If values in a series match a specified value, change them to `np.nan`.
Args:
series: Series or vector, often symbolic.
*values: Value(s) to convert to `np.nan` in the series. | [
"If",
"values",
"in",
"a",
"series",
"match",
"a",
"specified",
"value",
"change",
"them",
"to",
"np",
".",
"nan",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/vector.py#L239-L250 | train | 217,862 |
kieferk/dfply | dfply/window_functions.py | between | def between(series, a, b, inclusive=False):
"""
Returns a boolean series specifying whether rows of the input series
are between values `a` and `b`.
Args:
series: column to compare, typically symbolic.
a: value series must be greater than (or equal to if `inclusive=True`)
for the output series to be `True` at that position.
b: value series must be less than (or equal to if `inclusive=True`) for
the output series to be `True` at that position.
Kwargs:
inclusive (bool): If `True`, comparison is done with `>=` and `<=`.
If `False` (the default), comparison uses `>` and `<`.
"""
if inclusive == True:
met_condition = (series >= a) & (series <= b)
elif inclusive == False:
met_condition = (series > a) & (series < b)
return met_condition | python | def between(series, a, b, inclusive=False):
"""
Returns a boolean series specifying whether rows of the input series
are between values `a` and `b`.
Args:
series: column to compare, typically symbolic.
a: value series must be greater than (or equal to if `inclusive=True`)
for the output series to be `True` at that position.
b: value series must be less than (or equal to if `inclusive=True`) for
the output series to be `True` at that position.
Kwargs:
inclusive (bool): If `True`, comparison is done with `>=` and `<=`.
If `False` (the default), comparison uses `>` and `<`.
"""
if inclusive == True:
met_condition = (series >= a) & (series <= b)
elif inclusive == False:
met_condition = (series > a) & (series < b)
return met_condition | [
"def",
"between",
"(",
"series",
",",
"a",
",",
"b",
",",
"inclusive",
"=",
"False",
")",
":",
"if",
"inclusive",
"==",
"True",
":",
"met_condition",
"=",
"(",
"series",
">=",
"a",
")",
"&",
"(",
"series",
"<=",
"b",
")",
"elif",
"inclusive",
"==",... | Returns a boolean series specifying whether rows of the input series
are between values `a` and `b`.
Args:
series: column to compare, typically symbolic.
a: value series must be greater than (or equal to if `inclusive=True`)
for the output series to be `True` at that position.
b: value series must be less than (or equal to if `inclusive=True`) for
the output series to be `True` at that position.
Kwargs:
inclusive (bool): If `True`, comparison is done with `>=` and `<=`.
If `False` (the default), comparison uses `>` and `<`. | [
"Returns",
"a",
"boolean",
"series",
"specifying",
"whether",
"rows",
"of",
"the",
"input",
"series",
"are",
"between",
"values",
"a",
"and",
"b",
"."
] | 6a858f066602735a90f8b6b85106bc39ceadc282 | https://github.com/kieferk/dfply/blob/6a858f066602735a90f8b6b85106bc39ceadc282/dfply/window_functions.py#L43-L64 | train | 217,863 |
euske/pdfminer | pdfminer/psparser.py | PSBaseParser.seek | def seek(self, pos):
"""Seeks the parser to the given position.
"""
if self.debug:
logging.debug('seek: %r' % pos)
self.fp.seek(pos)
# reset the status for nextline()
self.bufpos = pos
self.buf = b''
self.charpos = 0
# reset the status for nexttoken()
self._parse1 = self._parse_main
self._curtoken = b''
self._curtokenpos = 0
self._tokens = []
return | python | def seek(self, pos):
"""Seeks the parser to the given position.
"""
if self.debug:
logging.debug('seek: %r' % pos)
self.fp.seek(pos)
# reset the status for nextline()
self.bufpos = pos
self.buf = b''
self.charpos = 0
# reset the status for nexttoken()
self._parse1 = self._parse_main
self._curtoken = b''
self._curtokenpos = 0
self._tokens = []
return | [
"def",
"seek",
"(",
"self",
",",
"pos",
")",
":",
"if",
"self",
".",
"debug",
":",
"logging",
".",
"debug",
"(",
"'seek: %r'",
"%",
"pos",
")",
"self",
".",
"fp",
".",
"seek",
"(",
"pos",
")",
"# reset the status for nextline()",
"self",
".",
"bufpos",... | Seeks the parser to the given position. | [
"Seeks",
"the",
"parser",
"to",
"the",
"given",
"position",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L191-L206 | train | 217,864 |
euske/pdfminer | pdfminer/psparser.py | PSBaseParser.nextline | def nextline(self):
"""Fetches a next line that ends either with \\r or \\n.
"""
linebuf = b''
linepos = self.bufpos + self.charpos
eol = False
while 1:
self.fillbuf()
if eol:
c = self.buf[self.charpos]
# handle b'\r\n'
if c == b'\n':
linebuf += c
self.charpos += 1
break
m = EOL.search(self.buf, self.charpos)
if m:
linebuf += self.buf[self.charpos:m.end(0)]
self.charpos = m.end(0)
if linebuf[-1] == b'\r':
eol = True
else:
break
else:
linebuf += self.buf[self.charpos:]
self.charpos = len(self.buf)
if self.debug:
logging.debug('nextline: %r, %r' % (linepos, linebuf))
return (linepos, linebuf) | python | def nextline(self):
"""Fetches a next line that ends either with \\r or \\n.
"""
linebuf = b''
linepos = self.bufpos + self.charpos
eol = False
while 1:
self.fillbuf()
if eol:
c = self.buf[self.charpos]
# handle b'\r\n'
if c == b'\n':
linebuf += c
self.charpos += 1
break
m = EOL.search(self.buf, self.charpos)
if m:
linebuf += self.buf[self.charpos:m.end(0)]
self.charpos = m.end(0)
if linebuf[-1] == b'\r':
eol = True
else:
break
else:
linebuf += self.buf[self.charpos:]
self.charpos = len(self.buf)
if self.debug:
logging.debug('nextline: %r, %r' % (linepos, linebuf))
return (linepos, linebuf) | [
"def",
"nextline",
"(",
"self",
")",
":",
"linebuf",
"=",
"b''",
"linepos",
"=",
"self",
".",
"bufpos",
"+",
"self",
".",
"charpos",
"eol",
"=",
"False",
"while",
"1",
":",
"self",
".",
"fillbuf",
"(",
")",
"if",
"eol",
":",
"c",
"=",
"self",
"."... | Fetches a next line that ends either with \\r or \\n. | [
"Fetches",
"a",
"next",
"line",
"that",
"ends",
"either",
"with",
"\\\\",
"r",
"or",
"\\\\",
"n",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L219-L247 | train | 217,865 |
euske/pdfminer | pdfminer/psparser.py | PSBaseParser.revreadlines | def revreadlines(self):
"""Fetches a next line backward.
This is used to locate the trailers at the end of a file.
"""
self.fp.seek(0, 2)
pos = self.fp.tell()
buf = b''
while 0 < pos:
prevpos = pos
pos = max(0, pos-self.BUFSIZ)
self.fp.seek(pos)
s = self.fp.read(prevpos-pos)
if not s:
break
while 1:
n = max(s.rfind(b'\r'), s.rfind(b'\n'))
if n == -1:
buf = s + buf
break
yield s[n:]+buf
s = s[:n]
buf = b''
return | python | def revreadlines(self):
"""Fetches a next line backward.
This is used to locate the trailers at the end of a file.
"""
self.fp.seek(0, 2)
pos = self.fp.tell()
buf = b''
while 0 < pos:
prevpos = pos
pos = max(0, pos-self.BUFSIZ)
self.fp.seek(pos)
s = self.fp.read(prevpos-pos)
if not s:
break
while 1:
n = max(s.rfind(b'\r'), s.rfind(b'\n'))
if n == -1:
buf = s + buf
break
yield s[n:]+buf
s = s[:n]
buf = b''
return | [
"def",
"revreadlines",
"(",
"self",
")",
":",
"self",
".",
"fp",
".",
"seek",
"(",
"0",
",",
"2",
")",
"pos",
"=",
"self",
".",
"fp",
".",
"tell",
"(",
")",
"buf",
"=",
"b''",
"while",
"0",
"<",
"pos",
":",
"prevpos",
"=",
"pos",
"pos",
"=",
... | Fetches a next line backward.
This is used to locate the trailers at the end of a file. | [
"Fetches",
"a",
"next",
"line",
"backward",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L249-L272 | train | 217,866 |
euske/pdfminer | pdfminer/psparser.py | PSStackParser.nextobject | def nextobject(self):
"""Yields a list of objects.
Returns keywords, literals, strings, numbers, arrays and dictionaries.
Arrays and dictionaries are represented as Python lists and dictionaries.
"""
while not self.results:
(pos, token) = self.nexttoken()
#print (pos,token), (self.curtype, self.curstack)
if isinstance(token, (int, long, float, bool, str, PSLiteral)):
# normal token
self.push((pos, token))
elif token == KEYWORD_ARRAY_BEGIN:
# begin array
self.start_type(pos, 'a')
elif token == KEYWORD_ARRAY_END:
# end array
try:
self.push(self.end_type('a'))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_DICT_BEGIN:
# begin dictionary
self.start_type(pos, 'd')
elif token == KEYWORD_DICT_END:
# end dictionary
try:
(pos, objs) = self.end_type('d')
if len(objs) % 2 != 0:
raise PSSyntaxError('Invalid dictionary construct: %r' % (objs,))
# construct a Python dictionary.
d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None)
self.push((pos, d))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_PROC_BEGIN:
# begin proc
self.start_type(pos, 'p')
elif token == KEYWORD_PROC_END:
# end proc
try:
self.push(self.end_type('p'))
except PSTypeError:
if STRICT:
raise
else:
if self.debug:
logging.debug('do_keyword: pos=%r, token=%r, stack=%r' % \
(pos, token, self.curstack))
self.do_keyword(pos, token)
if self.context:
continue
else:
self.flush()
obj = self.results.pop(0)
if self.debug:
logging.debug('nextobject: %r' % (obj,))
return obj | python | def nextobject(self):
"""Yields a list of objects.
Returns keywords, literals, strings, numbers, arrays and dictionaries.
Arrays and dictionaries are represented as Python lists and dictionaries.
"""
while not self.results:
(pos, token) = self.nexttoken()
#print (pos,token), (self.curtype, self.curstack)
if isinstance(token, (int, long, float, bool, str, PSLiteral)):
# normal token
self.push((pos, token))
elif token == KEYWORD_ARRAY_BEGIN:
# begin array
self.start_type(pos, 'a')
elif token == KEYWORD_ARRAY_END:
# end array
try:
self.push(self.end_type('a'))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_DICT_BEGIN:
# begin dictionary
self.start_type(pos, 'd')
elif token == KEYWORD_DICT_END:
# end dictionary
try:
(pos, objs) = self.end_type('d')
if len(objs) % 2 != 0:
raise PSSyntaxError('Invalid dictionary construct: %r' % (objs,))
# construct a Python dictionary.
d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None)
self.push((pos, d))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_PROC_BEGIN:
# begin proc
self.start_type(pos, 'p')
elif token == KEYWORD_PROC_END:
# end proc
try:
self.push(self.end_type('p'))
except PSTypeError:
if STRICT:
raise
else:
if self.debug:
logging.debug('do_keyword: pos=%r, token=%r, stack=%r' % \
(pos, token, self.curstack))
self.do_keyword(pos, token)
if self.context:
continue
else:
self.flush()
obj = self.results.pop(0)
if self.debug:
logging.debug('nextobject: %r' % (obj,))
return obj | [
"def",
"nextobject",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"results",
":",
"(",
"pos",
",",
"token",
")",
"=",
"self",
".",
"nexttoken",
"(",
")",
"#print (pos,token), (self.curtype, self.curstack)",
"if",
"isinstance",
"(",
"token",
",",
"(",
... | Yields a list of objects.
Returns keywords, literals, strings, numbers, arrays and dictionaries.
Arrays and dictionaries are represented as Python lists and dictionaries. | [
"Yields",
"a",
"list",
"of",
"objects",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/psparser.py#L567-L626 | train | 217,867 |
euske/pdfminer | pdfminer/encodingdb.py | name2unicode | def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m:
raise KeyError(name)
return unichr(int(m.group(0))) | python | def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m:
raise KeyError(name)
return unichr(int(m.group(0))) | [
"def",
"name2unicode",
"(",
"name",
")",
":",
"if",
"name",
"in",
"glyphname2unicode",
":",
"return",
"glyphname2unicode",
"[",
"name",
"]",
"m",
"=",
"STRIP_NAME",
".",
"search",
"(",
"name",
")",
"if",
"not",
"m",
":",
"raise",
"KeyError",
"(",
"name",... | Converts Adobe glyph names to Unicode numbers. | [
"Converts",
"Adobe",
"glyph",
"names",
"to",
"Unicode",
"numbers",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/encodingdb.py#L13-L20 | train | 217,868 |
euske/pdfminer | pdfminer/pdftypes.py | resolve1 | def resolve1(x, default=None):
"""Resolves an object.
If this is an array or dictionary, it may still contains
some indirect objects inside.
"""
while isinstance(x, PDFObjRef):
x = x.resolve(default=default)
return x | python | def resolve1(x, default=None):
"""Resolves an object.
If this is an array or dictionary, it may still contains
some indirect objects inside.
"""
while isinstance(x, PDFObjRef):
x = x.resolve(default=default)
return x | [
"def",
"resolve1",
"(",
"x",
",",
"default",
"=",
"None",
")",
":",
"while",
"isinstance",
"(",
"x",
",",
"PDFObjRef",
")",
":",
"x",
"=",
"x",
".",
"resolve",
"(",
"default",
"=",
"default",
")",
"return",
"x"
] | Resolves an object.
If this is an array or dictionary, it may still contains
some indirect objects inside. | [
"Resolves",
"an",
"object",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdftypes.py#L73-L81 | train | 217,869 |
euske/pdfminer | pdfminer/pdftypes.py | resolve_all | def resolve_all(x, default=None):
"""Recursively resolves the given object and all the internals.
Make sure there is no indirect reference within the nested object.
This procedure might be slow.
"""
while isinstance(x, PDFObjRef):
x = x.resolve(default=default)
if isinstance(x, list):
x = [resolve_all(v, default=default) for v in x]
elif isinstance(x, dict):
for (k, v) in x.iteritems():
x[k] = resolve_all(v, default=default)
return x | python | def resolve_all(x, default=None):
"""Recursively resolves the given object and all the internals.
Make sure there is no indirect reference within the nested object.
This procedure might be slow.
"""
while isinstance(x, PDFObjRef):
x = x.resolve(default=default)
if isinstance(x, list):
x = [resolve_all(v, default=default) for v in x]
elif isinstance(x, dict):
for (k, v) in x.iteritems():
x[k] = resolve_all(v, default=default)
return x | [
"def",
"resolve_all",
"(",
"x",
",",
"default",
"=",
"None",
")",
":",
"while",
"isinstance",
"(",
"x",
",",
"PDFObjRef",
")",
":",
"x",
"=",
"x",
".",
"resolve",
"(",
"default",
"=",
"default",
")",
"if",
"isinstance",
"(",
"x",
",",
"list",
")",
... | Recursively resolves the given object and all the internals.
Make sure there is no indirect reference within the nested object.
This procedure might be slow. | [
"Recursively",
"resolves",
"the",
"given",
"object",
"and",
"all",
"the",
"internals",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdftypes.py#L84-L97 | train | 217,870 |
euske/pdfminer | pdfminer/pdftypes.py | decipher_all | def decipher_all(decipher, objid, genno, x):
"""Recursively deciphers the given object.
"""
if isinstance(x, str):
return decipher(objid, genno, x)
if isinstance(x, list):
x = [decipher_all(decipher, objid, genno, v) for v in x]
elif isinstance(x, dict):
for (k, v) in x.iteritems():
x[k] = decipher_all(decipher, objid, genno, v)
return x | python | def decipher_all(decipher, objid, genno, x):
"""Recursively deciphers the given object.
"""
if isinstance(x, str):
return decipher(objid, genno, x)
if isinstance(x, list):
x = [decipher_all(decipher, objid, genno, v) for v in x]
elif isinstance(x, dict):
for (k, v) in x.iteritems():
x[k] = decipher_all(decipher, objid, genno, v)
return x | [
"def",
"decipher_all",
"(",
"decipher",
",",
"objid",
",",
"genno",
",",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"return",
"decipher",
"(",
"objid",
",",
"genno",
",",
"x",
")",
"if",
"isinstance",
"(",
"x",
",",
"list",
... | Recursively deciphers the given object. | [
"Recursively",
"deciphers",
"the",
"given",
"object",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdftypes.py#L100-L110 | train | 217,871 |
euske/pdfminer | pdfminer/pdfdocument.py | PDFDocument.find_xref | def find_xref(self, parser):
"""Internal function used to locate the first XRef."""
# search the last xref table by scanning the file backwards.
prev = None
for line in parser.revreadlines():
line = line.strip()
if self.debug:
logging.debug('find_xref: %r' % line)
if line == b'startxref':
break
if line:
prev = line
else:
raise PDFNoValidXRef('Unexpected EOF')
if self.debug:
logging.info('xref found: pos=%r' % prev)
return long(prev) | python | def find_xref(self, parser):
"""Internal function used to locate the first XRef."""
# search the last xref table by scanning the file backwards.
prev = None
for line in parser.revreadlines():
line = line.strip()
if self.debug:
logging.debug('find_xref: %r' % line)
if line == b'startxref':
break
if line:
prev = line
else:
raise PDFNoValidXRef('Unexpected EOF')
if self.debug:
logging.info('xref found: pos=%r' % prev)
return long(prev) | [
"def",
"find_xref",
"(",
"self",
",",
"parser",
")",
":",
"# search the last xref table by scanning the file backwards.",
"prev",
"=",
"None",
"for",
"line",
"in",
"parser",
".",
"revreadlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",... | Internal function used to locate the first XRef. | [
"Internal",
"function",
"used",
"to",
"locate",
"the",
"first",
"XRef",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdfdocument.py#L755-L771 | train | 217,872 |
euske/pdfminer | pdfminer/pdfdocument.py | PDFDocument.read_xref_from | def read_xref_from(self, parser, start, xrefs):
"""Reads XRefs from the given location."""
parser.seek(start)
parser.reset()
try:
(pos, token) = parser.nexttoken()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF')
if self.debug:
logging.info('read_xref_from: start=%d, token=%r' % (start, token))
if isinstance(token, int):
# XRefStream: PDF-1.5
parser.seek(pos)
parser.reset()
xref = PDFXRefStream()
xref.load(parser)
else:
if token is parser.KEYWORD_XREF:
parser.nextline()
xref = PDFXRef()
xref.load(parser)
xrefs.append(xref)
trailer = xref.get_trailer()
if self.debug:
logging.info('trailer: %r' % trailer)
if 'XRefStm' in trailer:
pos = int_value(trailer['XRefStm'])
self.read_xref_from(parser, pos, xrefs)
if 'Prev' in trailer:
# find previous xref
pos = int_value(trailer['Prev'])
self.read_xref_from(parser, pos, xrefs)
return | python | def read_xref_from(self, parser, start, xrefs):
"""Reads XRefs from the given location."""
parser.seek(start)
parser.reset()
try:
(pos, token) = parser.nexttoken()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF')
if self.debug:
logging.info('read_xref_from: start=%d, token=%r' % (start, token))
if isinstance(token, int):
# XRefStream: PDF-1.5
parser.seek(pos)
parser.reset()
xref = PDFXRefStream()
xref.load(parser)
else:
if token is parser.KEYWORD_XREF:
parser.nextline()
xref = PDFXRef()
xref.load(parser)
xrefs.append(xref)
trailer = xref.get_trailer()
if self.debug:
logging.info('trailer: %r' % trailer)
if 'XRefStm' in trailer:
pos = int_value(trailer['XRefStm'])
self.read_xref_from(parser, pos, xrefs)
if 'Prev' in trailer:
# find previous xref
pos = int_value(trailer['Prev'])
self.read_xref_from(parser, pos, xrefs)
return | [
"def",
"read_xref_from",
"(",
"self",
",",
"parser",
",",
"start",
",",
"xrefs",
")",
":",
"parser",
".",
"seek",
"(",
"start",
")",
"parser",
".",
"reset",
"(",
")",
"try",
":",
"(",
"pos",
",",
"token",
")",
"=",
"parser",
".",
"nexttoken",
"(",
... | Reads XRefs from the given location. | [
"Reads",
"XRefs",
"from",
"the",
"given",
"location",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdfdocument.py#L774-L806 | train | 217,873 |
euske/pdfminer | pdfminer/utils.py | mult_matrix | def mult_matrix(m1, m0):
(a1, b1, c1, d1, e1, f1) = m1
(a0, b0, c0, d0, e0, f0) = m0
"""Returns the multiplication of two matrices."""
return (a0*a1+c0*b1, b0*a1+d0*b1,
a0*c1+c0*d1, b0*c1+d0*d1,
a0*e1+c0*f1+e0, b0*e1+d0*f1+f0) | python | def mult_matrix(m1, m0):
(a1, b1, c1, d1, e1, f1) = m1
(a0, b0, c0, d0, e0, f0) = m0
"""Returns the multiplication of two matrices."""
return (a0*a1+c0*b1, b0*a1+d0*b1,
a0*c1+c0*d1, b0*c1+d0*d1,
a0*e1+c0*f1+e0, b0*e1+d0*f1+f0) | [
"def",
"mult_matrix",
"(",
"m1",
",",
"m0",
")",
":",
"(",
"a1",
",",
"b1",
",",
"c1",
",",
"d1",
",",
"e1",
",",
"f1",
")",
"=",
"m1",
"(",
"a0",
",",
"b0",
",",
"c0",
",",
"d0",
",",
"e0",
",",
"f0",
")",
"=",
"m0",
"return",
"(",
"a0... | Returns the multiplication of two matrices. | [
"Returns",
"the",
"multiplication",
"of",
"two",
"matrices",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L57-L63 | train | 217,874 |
euske/pdfminer | pdfminer/utils.py | uniq | def uniq(objs):
"""Eliminates duplicated elements."""
done = set()
for obj in objs:
if obj in done:
continue
done.add(obj)
yield obj
return | python | def uniq(objs):
"""Eliminates duplicated elements."""
done = set()
for obj in objs:
if obj in done:
continue
done.add(obj)
yield obj
return | [
"def",
"uniq",
"(",
"objs",
")",
":",
"done",
"=",
"set",
"(",
")",
"for",
"obj",
"in",
"objs",
":",
"if",
"obj",
"in",
"done",
":",
"continue",
"done",
".",
"add",
"(",
"obj",
")",
"yield",
"obj",
"return"
] | Eliminates duplicated elements. | [
"Eliminates",
"duplicated",
"elements",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L95-L103 | train | 217,875 |
euske/pdfminer | pdfminer/utils.py | csort | def csort(objs, key):
"""Order-preserving sorting function."""
idxs = dict((obj, i) for (i, obj) in enumerate(objs))
return sorted(objs, key=lambda obj: (key(obj), idxs[obj])) | python | def csort(objs, key):
"""Order-preserving sorting function."""
idxs = dict((obj, i) for (i, obj) in enumerate(objs))
return sorted(objs, key=lambda obj: (key(obj), idxs[obj])) | [
"def",
"csort",
"(",
"objs",
",",
"key",
")",
":",
"idxs",
"=",
"dict",
"(",
"(",
"obj",
",",
"i",
")",
"for",
"(",
"i",
",",
"obj",
")",
"in",
"enumerate",
"(",
"objs",
")",
")",
"return",
"sorted",
"(",
"objs",
",",
"key",
"=",
"lambda",
"o... | Order-preserving sorting function. | [
"Order",
"-",
"preserving",
"sorting",
"function",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L107-L110 | train | 217,876 |
euske/pdfminer | pdfminer/utils.py | fsplit | def fsplit(pred, objs):
"""Split a list into two classes according to the predicate."""
t = []
f = []
for obj in objs:
if pred(obj):
t.append(obj)
else:
f.append(obj)
return (t, f) | python | def fsplit(pred, objs):
"""Split a list into two classes according to the predicate."""
t = []
f = []
for obj in objs:
if pred(obj):
t.append(obj)
else:
f.append(obj)
return (t, f) | [
"def",
"fsplit",
"(",
"pred",
",",
"objs",
")",
":",
"t",
"=",
"[",
"]",
"f",
"=",
"[",
"]",
"for",
"obj",
"in",
"objs",
":",
"if",
"pred",
"(",
"obj",
")",
":",
"t",
".",
"append",
"(",
"obj",
")",
"else",
":",
"f",
".",
"append",
"(",
"... | Split a list into two classes according to the predicate. | [
"Split",
"a",
"list",
"into",
"two",
"classes",
"according",
"to",
"the",
"predicate",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L114-L123 | train | 217,877 |
euske/pdfminer | pdfminer/utils.py | drange | def drange(v0, v1, d):
"""Returns a discrete range."""
assert v0 < v1
return xrange(int(v0)//d, int(v1+d)//d) | python | def drange(v0, v1, d):
"""Returns a discrete range."""
assert v0 < v1
return xrange(int(v0)//d, int(v1+d)//d) | [
"def",
"drange",
"(",
"v0",
",",
"v1",
",",
"d",
")",
":",
"assert",
"v0",
"<",
"v1",
"return",
"xrange",
"(",
"int",
"(",
"v0",
")",
"//",
"d",
",",
"int",
"(",
"v1",
"+",
"d",
")",
"//",
"d",
")"
] | Returns a discrete range. | [
"Returns",
"a",
"discrete",
"range",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L127-L130 | train | 217,878 |
euske/pdfminer | pdfminer/utils.py | get_bound | def get_bound(pts):
"""Compute a minimal rectangle that covers all the points."""
(x0, y0, x1, y1) = (INF, INF, -INF, -INF)
for (x, y) in pts:
x0 = min(x0, x)
y0 = min(y0, y)
x1 = max(x1, x)
y1 = max(y1, y)
return (x0, y0, x1, y1) | python | def get_bound(pts):
"""Compute a minimal rectangle that covers all the points."""
(x0, y0, x1, y1) = (INF, INF, -INF, -INF)
for (x, y) in pts:
x0 = min(x0, x)
y0 = min(y0, y)
x1 = max(x1, x)
y1 = max(y1, y)
return (x0, y0, x1, y1) | [
"def",
"get_bound",
"(",
"pts",
")",
":",
"(",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
")",
"=",
"(",
"INF",
",",
"INF",
",",
"-",
"INF",
",",
"-",
"INF",
")",
"for",
"(",
"x",
",",
"y",
")",
"in",
"pts",
":",
"x0",
"=",
"min",
"(",
"x0"... | Compute a minimal rectangle that covers all the points. | [
"Compute",
"a",
"minimal",
"rectangle",
"that",
"covers",
"all",
"the",
"points",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L134-L142 | train | 217,879 |
euske/pdfminer | pdfminer/utils.py | choplist | def choplist(n, seq):
"""Groups every n elements of the list."""
r = []
for x in seq:
r.append(x)
if len(r) == n:
yield tuple(r)
r = []
return | python | def choplist(n, seq):
"""Groups every n elements of the list."""
r = []
for x in seq:
r.append(x)
if len(r) == n:
yield tuple(r)
r = []
return | [
"def",
"choplist",
"(",
"n",
",",
"seq",
")",
":",
"r",
"=",
"[",
"]",
"for",
"x",
"in",
"seq",
":",
"r",
".",
"append",
"(",
"x",
")",
"if",
"len",
"(",
"r",
")",
"==",
"n",
":",
"yield",
"tuple",
"(",
"r",
")",
"r",
"=",
"[",
"]",
"re... | Groups every n elements of the list. | [
"Groups",
"every",
"n",
"elements",
"of",
"the",
"list",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L157-L165 | train | 217,880 |
euske/pdfminer | pdfminer/utils.py | decode_text | def decode_text(s):
"""Decodes a PDFDocEncoding string to Unicode."""
if s.startswith(b'\xfe\xff'):
return unicode(s[2:], 'utf-16be', 'ignore')
else:
return ''.join(PDFDocEncoding[ord(c)] for c in s) | python | def decode_text(s):
"""Decodes a PDFDocEncoding string to Unicode."""
if s.startswith(b'\xfe\xff'):
return unicode(s[2:], 'utf-16be', 'ignore')
else:
return ''.join(PDFDocEncoding[ord(c)] for c in s) | [
"def",
"decode_text",
"(",
"s",
")",
":",
"if",
"s",
".",
"startswith",
"(",
"b'\\xfe\\xff'",
")",
":",
"return",
"unicode",
"(",
"s",
"[",
"2",
":",
"]",
",",
"'utf-16be'",
",",
"'ignore'",
")",
"else",
":",
"return",
"''",
".",
"join",
"(",
"PDFD... | Decodes a PDFDocEncoding string to Unicode. | [
"Decodes",
"a",
"PDFDocEncoding",
"string",
"to",
"Unicode",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/utils.py#L223-L228 | train | 217,881 |
euske/pdfminer | pdfminer/pdfparser.py | PDFParser.do_keyword | def do_keyword(self, pos, token):
"""Handles PDF-related keywords."""
if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF):
self.add_results(*self.pop(1))
elif token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4))
elif token is self.KEYWORD_NULL:
# null object
self.push((pos, None))
elif token is self.KEYWORD_R:
# reference to indirect object
try:
((_, objid), (_, genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
elif token is self.KEYWORD_STREAM:
# stream object
((_, dic),) = self.pop(1)
dic = dict_value(dic)
objlen = 0
if not self.fallback:
try:
objlen = int_value(dic['Length'])
except KeyError:
if STRICT:
raise PDFSyntaxError('/Length is undefined: %r' % dic)
self.seek(pos)
try:
(_, line) = self.nextline() # 'stream'
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
return
pos += len(line)
self.fp.seek(pos)
data = self.fp.read(objlen)
self.seek(pos+objlen)
while 1:
try:
(linepos, line) = self.nextline()
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
break
if b'endstream' in line:
i = line.index(b'endstream')
objlen += i
if self.fallback:
data += line[:i]
break
objlen += len(line)
if self.fallback:
data += line
self.seek(pos+objlen)
# XXX limit objlen not to exceed object boundary
if self.debug:
logging.debug('Stream: pos=%d, objlen=%d, dic=%r, data=%r...' % \
(pos, objlen, dic, data[:10]))
obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj))
else:
# others
self.push((pos, token))
return | python | def do_keyword(self, pos, token):
"""Handles PDF-related keywords."""
if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF):
self.add_results(*self.pop(1))
elif token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4))
elif token is self.KEYWORD_NULL:
# null object
self.push((pos, None))
elif token is self.KEYWORD_R:
# reference to indirect object
try:
((_, objid), (_, genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
elif token is self.KEYWORD_STREAM:
# stream object
((_, dic),) = self.pop(1)
dic = dict_value(dic)
objlen = 0
if not self.fallback:
try:
objlen = int_value(dic['Length'])
except KeyError:
if STRICT:
raise PDFSyntaxError('/Length is undefined: %r' % dic)
self.seek(pos)
try:
(_, line) = self.nextline() # 'stream'
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
return
pos += len(line)
self.fp.seek(pos)
data = self.fp.read(objlen)
self.seek(pos+objlen)
while 1:
try:
(linepos, line) = self.nextline()
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
break
if b'endstream' in line:
i = line.index(b'endstream')
objlen += i
if self.fallback:
data += line[:i]
break
objlen += len(line)
if self.fallback:
data += line
self.seek(pos+objlen)
# XXX limit objlen not to exceed object boundary
if self.debug:
logging.debug('Stream: pos=%d, objlen=%d, dic=%r, data=%r...' % \
(pos, objlen, dic, data[:10]))
obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj))
else:
# others
self.push((pos, token))
return | [
"def",
"do_keyword",
"(",
"self",
",",
"pos",
",",
"token",
")",
":",
"if",
"token",
"in",
"(",
"self",
".",
"KEYWORD_XREF",
",",
"self",
".",
"KEYWORD_STARTXREF",
")",
":",
"self",
".",
"add_results",
"(",
"*",
"self",
".",
"pop",
"(",
"1",
")",
"... | Handles PDF-related keywords. | [
"Handles",
"PDF",
"-",
"related",
"keywords",
"."
] | 8150458718e9024c80b00e74965510b20206e588 | https://github.com/euske/pdfminer/blob/8150458718e9024c80b00e74965510b20206e588/pdfminer/pdfparser.py#L60-L133 | train | 217,882 |
eliangcs/http-prompt | http_prompt/execution.py | generate_help_text | def generate_help_text():
"""Return a formatted string listing commands, HTTPie options, and HTTP
actions.
"""
def generate_cmds_with_explanations(summary, cmds):
text = '{0}:\n'.format(summary)
for cmd, explanation in cmds:
text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation)
return text + '\n'
text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items())
text += generate_cmds_with_explanations('Options', OPTION_NAMES.items())
text += generate_cmds_with_explanations('Actions', ACTIONS.items())
text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items())
return text | python | def generate_help_text():
"""Return a formatted string listing commands, HTTPie options, and HTTP
actions.
"""
def generate_cmds_with_explanations(summary, cmds):
text = '{0}:\n'.format(summary)
for cmd, explanation in cmds:
text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation)
return text + '\n'
text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items())
text += generate_cmds_with_explanations('Options', OPTION_NAMES.items())
text += generate_cmds_with_explanations('Actions', ACTIONS.items())
text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items())
return text | [
"def",
"generate_help_text",
"(",
")",
":",
"def",
"generate_cmds_with_explanations",
"(",
"summary",
",",
"cmds",
")",
":",
"text",
"=",
"'{0}:\\n'",
".",
"format",
"(",
"summary",
")",
"for",
"cmd",
",",
"explanation",
"in",
"cmds",
":",
"text",
"+=",
"'... | Return a formatted string listing commands, HTTPie options, and HTTP
actions. | [
"Return",
"a",
"formatted",
"string",
"listing",
"commands",
"HTTPie",
"options",
"and",
"HTTP",
"actions",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/execution.py#L154-L168 | train | 217,883 |
eliangcs/http-prompt | http_prompt/utils.py | colformat | def colformat(strings, num_sep_spaces=1, terminal_width=None):
"""Format a list of strings like ls does multi-column output."""
if terminal_width is None:
terminal_width = get_terminal_size().columns
if not strings:
return
num_items = len(strings)
max_len = max([len(strip_ansi_escapes(s)) for s in strings])
num_columns = min(
int((terminal_width + num_sep_spaces) / (max_len + num_sep_spaces)),
num_items)
num_columns = max(1, num_columns)
num_lines = int(math.ceil(float(num_items) / num_columns))
num_columns = int(math.ceil(float(num_items) / num_lines))
num_elements_last_column = num_items % num_lines
if num_elements_last_column == 0:
num_elements_last_column = num_lines
lines = []
for i in range(num_lines):
line_size = num_columns
if i >= num_elements_last_column:
line_size -= 1
lines.append([None] * line_size)
for i, line in enumerate(lines):
line_size = len(line)
for j in range(line_size):
k = i + num_lines * j
item = strings[k]
if j % line_size != line_size - 1:
item_len = len(strip_ansi_escapes(item))
item = item + ' ' * (max_len - item_len)
line[j] = item
sep = ' ' * num_sep_spaces
for line in lines:
yield sep.join(line) | python | def colformat(strings, num_sep_spaces=1, terminal_width=None):
"""Format a list of strings like ls does multi-column output."""
if terminal_width is None:
terminal_width = get_terminal_size().columns
if not strings:
return
num_items = len(strings)
max_len = max([len(strip_ansi_escapes(s)) for s in strings])
num_columns = min(
int((terminal_width + num_sep_spaces) / (max_len + num_sep_spaces)),
num_items)
num_columns = max(1, num_columns)
num_lines = int(math.ceil(float(num_items) / num_columns))
num_columns = int(math.ceil(float(num_items) / num_lines))
num_elements_last_column = num_items % num_lines
if num_elements_last_column == 0:
num_elements_last_column = num_lines
lines = []
for i in range(num_lines):
line_size = num_columns
if i >= num_elements_last_column:
line_size -= 1
lines.append([None] * line_size)
for i, line in enumerate(lines):
line_size = len(line)
for j in range(line_size):
k = i + num_lines * j
item = strings[k]
if j % line_size != line_size - 1:
item_len = len(strip_ansi_escapes(item))
item = item + ' ' * (max_len - item_len)
line[j] = item
sep = ' ' * num_sep_spaces
for line in lines:
yield sep.join(line) | [
"def",
"colformat",
"(",
"strings",
",",
"num_sep_spaces",
"=",
"1",
",",
"terminal_width",
"=",
"None",
")",
":",
"if",
"terminal_width",
"is",
"None",
":",
"terminal_width",
"=",
"get_terminal_size",
"(",
")",
".",
"columns",
"if",
"not",
"strings",
":",
... | Format a list of strings like ls does multi-column output. | [
"Format",
"a",
"list",
"of",
"strings",
"like",
"ls",
"does",
"multi",
"-",
"column",
"output",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/utils.py#L47-L89 | train | 217,884 |
eliangcs/http-prompt | http_prompt/contextio.py | load_context | def load_context(context, file_path=None):
"""Load a Context object in place from user data directory."""
if not file_path:
file_path = _get_context_filepath()
if os.path.exists(file_path):
with io.open(file_path, encoding='utf-8') as f:
for line in f:
execute(line, context) | python | def load_context(context, file_path=None):
"""Load a Context object in place from user data directory."""
if not file_path:
file_path = _get_context_filepath()
if os.path.exists(file_path):
with io.open(file_path, encoding='utf-8') as f:
for line in f:
execute(line, context) | [
"def",
"load_context",
"(",
"context",
",",
"file_path",
"=",
"None",
")",
":",
"if",
"not",
"file_path",
":",
"file_path",
"=",
"_get_context_filepath",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"with",
"io",
".",
"... | Load a Context object in place from user data directory. | [
"Load",
"a",
"Context",
"object",
"in",
"place",
"from",
"user",
"data",
"directory",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/contextio.py#L23-L30 | train | 217,885 |
eliangcs/http-prompt | http_prompt/contextio.py | save_context | def save_context(context):
"""Save a Context object to user data directory."""
file_path = _get_context_filepath()
content = format_to_http_prompt(context, excluded_options=EXCLUDED_OPTIONS)
with io.open(file_path, 'w', encoding='utf-8') as f:
f.write(content) | python | def save_context(context):
"""Save a Context object to user data directory."""
file_path = _get_context_filepath()
content = format_to_http_prompt(context, excluded_options=EXCLUDED_OPTIONS)
with io.open(file_path, 'w', encoding='utf-8') as f:
f.write(content) | [
"def",
"save_context",
"(",
"context",
")",
":",
"file_path",
"=",
"_get_context_filepath",
"(",
")",
"content",
"=",
"format_to_http_prompt",
"(",
"context",
",",
"excluded_options",
"=",
"EXCLUDED_OPTIONS",
")",
"with",
"io",
".",
"open",
"(",
"file_path",
","... | Save a Context object to user data directory. | [
"Save",
"a",
"Context",
"object",
"to",
"user",
"data",
"directory",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/contextio.py#L33-L38 | train | 217,886 |
eliangcs/http-prompt | http_prompt/context/transform.py | extract_args_for_httpie_main | def extract_args_for_httpie_main(context, method=None):
"""Transform a Context object to a list of arguments that can be passed to
HTTPie main function.
"""
args = _extract_httpie_options(context)
if method:
args.append(method.upper())
args.append(context.url)
args += _extract_httpie_request_items(context)
return args | python | def extract_args_for_httpie_main(context, method=None):
"""Transform a Context object to a list of arguments that can be passed to
HTTPie main function.
"""
args = _extract_httpie_options(context)
if method:
args.append(method.upper())
args.append(context.url)
args += _extract_httpie_request_items(context)
return args | [
"def",
"extract_args_for_httpie_main",
"(",
"context",
",",
"method",
"=",
"None",
")",
":",
"args",
"=",
"_extract_httpie_options",
"(",
"context",
")",
"if",
"method",
":",
"args",
".",
"append",
"(",
"method",
".",
"upper",
"(",
")",
")",
"args",
".",
... | Transform a Context object to a list of arguments that can be passed to
HTTPie main function. | [
"Transform",
"a",
"Context",
"object",
"to",
"a",
"list",
"of",
"arguments",
"that",
"can",
"be",
"passed",
"to",
"HTTPie",
"main",
"function",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/context/transform.py#L73-L84 | train | 217,887 |
eliangcs/http-prompt | http_prompt/context/transform.py | format_to_httpie | def format_to_httpie(context, method=None):
"""Format a Context object to an HTTPie command."""
cmd = ['http'] + _extract_httpie_options(context, quote=True,
join_key_value=True)
if method:
cmd.append(method.upper())
cmd.append(context.url)
cmd += _extract_httpie_request_items(context, quote=True)
return ' '.join(cmd) + '\n' | python | def format_to_httpie(context, method=None):
"""Format a Context object to an HTTPie command."""
cmd = ['http'] + _extract_httpie_options(context, quote=True,
join_key_value=True)
if method:
cmd.append(method.upper())
cmd.append(context.url)
cmd += _extract_httpie_request_items(context, quote=True)
return ' '.join(cmd) + '\n' | [
"def",
"format_to_httpie",
"(",
"context",
",",
"method",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"'http'",
"]",
"+",
"_extract_httpie_options",
"(",
"context",
",",
"quote",
"=",
"True",
",",
"join_key_value",
"=",
"True",
")",
"if",
"method",
":",
"cmd... | Format a Context object to an HTTPie command. | [
"Format",
"a",
"Context",
"object",
"to",
"an",
"HTTPie",
"command",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/context/transform.py#L97-L105 | train | 217,888 |
eliangcs/http-prompt | http_prompt/context/transform.py | format_to_http_prompt | def format_to_http_prompt(context, excluded_options=None):
"""Format a Context object to HTTP Prompt commands."""
cmds = _extract_httpie_options(context, quote=True, join_key_value=True,
excluded_keys=excluded_options)
cmds.append('cd ' + smart_quote(context.url))
cmds += _extract_httpie_request_items(context, quote=True)
return '\n'.join(cmds) + '\n' | python | def format_to_http_prompt(context, excluded_options=None):
"""Format a Context object to HTTP Prompt commands."""
cmds = _extract_httpie_options(context, quote=True, join_key_value=True,
excluded_keys=excluded_options)
cmds.append('cd ' + smart_quote(context.url))
cmds += _extract_httpie_request_items(context, quote=True)
return '\n'.join(cmds) + '\n' | [
"def",
"format_to_http_prompt",
"(",
"context",
",",
"excluded_options",
"=",
"None",
")",
":",
"cmds",
"=",
"_extract_httpie_options",
"(",
"context",
",",
"quote",
"=",
"True",
",",
"join_key_value",
"=",
"True",
",",
"excluded_keys",
"=",
"excluded_options",
... | Format a Context object to HTTP Prompt commands. | [
"Format",
"a",
"Context",
"object",
"to",
"HTTP",
"Prompt",
"commands",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/context/transform.py#L108-L114 | train | 217,889 |
eliangcs/http-prompt | http_prompt/config.py | initialize | def initialize():
"""Initialize a default config file if it doesn't exist yet.
Returns:
tuple: A tuple of (copied, dst_path). `copied` is a bool indicating if
this function created the default config file. `dst_path` is the
path of the user config file.
"""
dst_path = get_user_config_path()
copied = False
if not os.path.exists(dst_path):
src_path = os.path.join(os.path.dirname(__file__), 'defaultconfig.py')
shutil.copyfile(src_path, dst_path)
copied = True
return copied, dst_path | python | def initialize():
"""Initialize a default config file if it doesn't exist yet.
Returns:
tuple: A tuple of (copied, dst_path). `copied` is a bool indicating if
this function created the default config file. `dst_path` is the
path of the user config file.
"""
dst_path = get_user_config_path()
copied = False
if not os.path.exists(dst_path):
src_path = os.path.join(os.path.dirname(__file__), 'defaultconfig.py')
shutil.copyfile(src_path, dst_path)
copied = True
return copied, dst_path | [
"def",
"initialize",
"(",
")",
":",
"dst_path",
"=",
"get_user_config_path",
"(",
")",
"copied",
"=",
"False",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dst_path",
")",
":",
"src_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
... | Initialize a default config file if it doesn't exist yet.
Returns:
tuple: A tuple of (copied, dst_path). `copied` is a bool indicating if
this function created the default config file. `dst_path` is the
path of the user config file. | [
"Initialize",
"a",
"default",
"config",
"file",
"if",
"it",
"doesn",
"t",
"exist",
"yet",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/config.py#L17-L31 | train | 217,890 |
eliangcs/http-prompt | http_prompt/config.py | load_user | def load_user():
"""Read user config file and return it as a dict."""
config_path = get_user_config_path()
config = {}
# TODO: This may be overkill and too slow just for reading a config file
with open(config_path) as f:
code = compile(f.read(), config_path, 'exec')
exec(code, config)
keys = list(six.iterkeys(config))
for k in keys:
if k.startswith('_'):
del config[k]
return config | python | def load_user():
"""Read user config file and return it as a dict."""
config_path = get_user_config_path()
config = {}
# TODO: This may be overkill and too slow just for reading a config file
with open(config_path) as f:
code = compile(f.read(), config_path, 'exec')
exec(code, config)
keys = list(six.iterkeys(config))
for k in keys:
if k.startswith('_'):
del config[k]
return config | [
"def",
"load_user",
"(",
")",
":",
"config_path",
"=",
"get_user_config_path",
"(",
")",
"config",
"=",
"{",
"}",
"# TODO: This may be overkill and too slow just for reading a config file",
"with",
"open",
"(",
"config_path",
")",
"as",
"f",
":",
"code",
"=",
"compi... | Read user config file and return it as a dict. | [
"Read",
"user",
"config",
"file",
"and",
"return",
"it",
"as",
"a",
"dict",
"."
] | 189321f25e3526fa1b79a9dc38c317892c478986 | https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/config.py#L48-L63 | train | 217,891 |
bcbio/bcbio-nextgen | bcbio/bam/fastq.py | filter_single_reads_by_length | def filter_single_reads_by_length(in_file, quality_format, min_length=20,
out_file=None):
"""
removes reads from a fastq file which are shorter than a minimum
length
"""
logger.info("Removing reads in %s thare are less than %d bases."
% (in_file, min_length))
in_iterator = SeqIO.parse(in_file, quality_format)
out_iterator = (record for record in in_iterator if
len(record.seq) > min_length)
with file_transaction(out_file) as tmp_out_file:
with open(tmp_out_file, "w") as out_handle:
SeqIO.write(out_iterator, out_handle, quality_format)
return out_file | python | def filter_single_reads_by_length(in_file, quality_format, min_length=20,
out_file=None):
"""
removes reads from a fastq file which are shorter than a minimum
length
"""
logger.info("Removing reads in %s thare are less than %d bases."
% (in_file, min_length))
in_iterator = SeqIO.parse(in_file, quality_format)
out_iterator = (record for record in in_iterator if
len(record.seq) > min_length)
with file_transaction(out_file) as tmp_out_file:
with open(tmp_out_file, "w") as out_handle:
SeqIO.write(out_iterator, out_handle, quality_format)
return out_file | [
"def",
"filter_single_reads_by_length",
"(",
"in_file",
",",
"quality_format",
",",
"min_length",
"=",
"20",
",",
"out_file",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"Removing reads in %s thare are less than %d bases.\"",
"%",
"(",
"in_file",
",",
"min_... | removes reads from a fastq file which are shorter than a minimum
length | [
"removes",
"reads",
"from",
"a",
"fastq",
"file",
"which",
"are",
"shorter",
"than",
"a",
"minimum",
"length"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L40-L55 | train | 217,892 |
bcbio/bcbio-nextgen | bcbio/bam/fastq.py | filter_reads_by_length | def filter_reads_by_length(fq1, fq2, quality_format, min_length=20):
"""
removes reads from a pair of fastq files that are shorter than
a minimum length. removes both ends of a read if one end falls
below the threshold while maintaining the order of the reads
"""
logger.info("Removing reads in %s and %s that "
"are less than %d bases." % (fq1, fq2, min_length))
fq1_out = utils.append_stem(fq1, ".fixed")
fq2_out = utils.append_stem(fq2, ".fixed")
fq1_single = utils.append_stem(fq1, ".singles")
fq2_single = utils.append_stem(fq2, ".singles")
if all(map(utils.file_exists, [fq1_out, fq2_out, fq2_single, fq2_single])):
return [fq1_out, fq2_out]
fq1_in = SeqIO.parse(fq1, quality_format)
fq2_in = SeqIO.parse(fq2, quality_format)
out_files = [fq1_out, fq2_out, fq1_single, fq2_single]
with file_transaction(out_files) as tmp_out_files:
fq1_out_handle = open(tmp_out_files[0], "w")
fq2_out_handle = open(tmp_out_files[1], "w")
fq1_single_handle = open(tmp_out_files[2], "w")
fq2_single_handle = open(tmp_out_files[3], "w")
for fq1_record, fq2_record in zip(fq1_in, fq2_in):
if len(fq1_record.seq) >= min_length and len(fq2_record.seq) >= min_length:
fq1_out_handle.write(fq1_record.format(quality_format))
fq2_out_handle.write(fq2_record.format(quality_format))
else:
if len(fq1_record.seq) > min_length:
fq1_single_handle.write(fq1_record.format(quality_format))
if len(fq2_record.seq) > min_length:
fq2_single_handle.write(fq2_record.format(quality_format))
fq1_out_handle.close()
fq2_out_handle.close()
fq1_single_handle.close()
fq2_single_handle.close()
return [fq1_out, fq2_out] | python | def filter_reads_by_length(fq1, fq2, quality_format, min_length=20):
"""
removes reads from a pair of fastq files that are shorter than
a minimum length. removes both ends of a read if one end falls
below the threshold while maintaining the order of the reads
"""
logger.info("Removing reads in %s and %s that "
"are less than %d bases." % (fq1, fq2, min_length))
fq1_out = utils.append_stem(fq1, ".fixed")
fq2_out = utils.append_stem(fq2, ".fixed")
fq1_single = utils.append_stem(fq1, ".singles")
fq2_single = utils.append_stem(fq2, ".singles")
if all(map(utils.file_exists, [fq1_out, fq2_out, fq2_single, fq2_single])):
return [fq1_out, fq2_out]
fq1_in = SeqIO.parse(fq1, quality_format)
fq2_in = SeqIO.parse(fq2, quality_format)
out_files = [fq1_out, fq2_out, fq1_single, fq2_single]
with file_transaction(out_files) as tmp_out_files:
fq1_out_handle = open(tmp_out_files[0], "w")
fq2_out_handle = open(tmp_out_files[1], "w")
fq1_single_handle = open(tmp_out_files[2], "w")
fq2_single_handle = open(tmp_out_files[3], "w")
for fq1_record, fq2_record in zip(fq1_in, fq2_in):
if len(fq1_record.seq) >= min_length and len(fq2_record.seq) >= min_length:
fq1_out_handle.write(fq1_record.format(quality_format))
fq2_out_handle.write(fq2_record.format(quality_format))
else:
if len(fq1_record.seq) > min_length:
fq1_single_handle.write(fq1_record.format(quality_format))
if len(fq2_record.seq) > min_length:
fq2_single_handle.write(fq2_record.format(quality_format))
fq1_out_handle.close()
fq2_out_handle.close()
fq1_single_handle.close()
fq2_single_handle.close()
return [fq1_out, fq2_out] | [
"def",
"filter_reads_by_length",
"(",
"fq1",
",",
"fq2",
",",
"quality_format",
",",
"min_length",
"=",
"20",
")",
":",
"logger",
".",
"info",
"(",
"\"Removing reads in %s and %s that \"",
"\"are less than %d bases.\"",
"%",
"(",
"fq1",
",",
"fq2",
",",
"min_lengt... | removes reads from a pair of fastq files that are shorter than
a minimum length. removes both ends of a read if one end falls
below the threshold while maintaining the order of the reads | [
"removes",
"reads",
"from",
"a",
"pair",
"of",
"fastq",
"files",
"that",
"are",
"shorter",
"than",
"a",
"minimum",
"length",
".",
"removes",
"both",
"ends",
"of",
"a",
"read",
"if",
"one",
"end",
"falls",
"below",
"the",
"threshold",
"while",
"maintaining"... | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L57-L99 | train | 217,893 |
bcbio/bcbio-nextgen | bcbio/bam/fastq.py | rstrip_extra | def rstrip_extra(fname):
"""Strip extraneous, non-discriminative filename info from the end of a file.
"""
to_strip = ("_R", ".R", "-R", "_", "fastq", ".", "-")
while fname.endswith(to_strip):
for x in to_strip:
if fname.endswith(x):
fname = fname[:len(fname) - len(x)]
break
return fname | python | def rstrip_extra(fname):
"""Strip extraneous, non-discriminative filename info from the end of a file.
"""
to_strip = ("_R", ".R", "-R", "_", "fastq", ".", "-")
while fname.endswith(to_strip):
for x in to_strip:
if fname.endswith(x):
fname = fname[:len(fname) - len(x)]
break
return fname | [
"def",
"rstrip_extra",
"(",
"fname",
")",
":",
"to_strip",
"=",
"(",
"\"_R\"",
",",
"\".R\"",
",",
"\"-R\"",
",",
"\"_\"",
",",
"\"fastq\"",
",",
"\".\"",
",",
"\"-\"",
")",
"while",
"fname",
".",
"endswith",
"(",
"to_strip",
")",
":",
"for",
"x",
"i... | Strip extraneous, non-discriminative filename info from the end of a file. | [
"Strip",
"extraneous",
"non",
"-",
"discriminative",
"filename",
"info",
"from",
"the",
"end",
"of",
"a",
"file",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L101-L110 | train | 217,894 |
bcbio/bcbio-nextgen | bcbio/bam/fastq.py | fast_combine_pairs | def fast_combine_pairs(files, force_single, full_name, separators):
"""
assume files that need to be paired are within 10 entries of each other, once the list is sorted
"""
files = sort_filenames(files)
chunks = tz.sliding_window(10, files)
pairs = [combine_pairs(chunk, force_single, full_name, separators) for chunk in chunks]
pairs = [y for x in pairs for y in x]
longest = defaultdict(list)
# for each file, save the longest pair it is in
for pair in pairs:
for file in pair:
if len(longest[file]) < len(pair):
longest[file] = pair
# keep only unique pairs
longest = {tuple(sort_filenames(x)) for x in longest.values()}
# ensure filenames are R1 followed by R2
return [sort_filenames(list(x)) for x in longest] | python | def fast_combine_pairs(files, force_single, full_name, separators):
"""
assume files that need to be paired are within 10 entries of each other, once the list is sorted
"""
files = sort_filenames(files)
chunks = tz.sliding_window(10, files)
pairs = [combine_pairs(chunk, force_single, full_name, separators) for chunk in chunks]
pairs = [y for x in pairs for y in x]
longest = defaultdict(list)
# for each file, save the longest pair it is in
for pair in pairs:
for file in pair:
if len(longest[file]) < len(pair):
longest[file] = pair
# keep only unique pairs
longest = {tuple(sort_filenames(x)) for x in longest.values()}
# ensure filenames are R1 followed by R2
return [sort_filenames(list(x)) for x in longest] | [
"def",
"fast_combine_pairs",
"(",
"files",
",",
"force_single",
",",
"full_name",
",",
"separators",
")",
":",
"files",
"=",
"sort_filenames",
"(",
"files",
")",
"chunks",
"=",
"tz",
".",
"sliding_window",
"(",
"10",
",",
"files",
")",
"pairs",
"=",
"[",
... | assume files that need to be paired are within 10 entries of each other, once the list is sorted | [
"assume",
"files",
"that",
"need",
"to",
"be",
"paired",
"are",
"within",
"10",
"entries",
"of",
"each",
"other",
"once",
"the",
"list",
"is",
"sorted"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L187-L204 | train | 217,895 |
bcbio/bcbio-nextgen | bcbio/bam/fastq.py | open_fastq | def open_fastq(in_file):
""" open a fastq file, using gzip if it is gzipped
"""
if objectstore.is_remote(in_file):
return objectstore.open_file(in_file)
else:
return utils.open_gzipsafe(in_file) | python | def open_fastq(in_file):
""" open a fastq file, using gzip if it is gzipped
"""
if objectstore.is_remote(in_file):
return objectstore.open_file(in_file)
else:
return utils.open_gzipsafe(in_file) | [
"def",
"open_fastq",
"(",
"in_file",
")",
":",
"if",
"objectstore",
".",
"is_remote",
"(",
"in_file",
")",
":",
"return",
"objectstore",
".",
"open_file",
"(",
"in_file",
")",
"else",
":",
"return",
"utils",
".",
"open_gzipsafe",
"(",
"in_file",
")"
] | open a fastq file, using gzip if it is gzipped | [
"open",
"a",
"fastq",
"file",
"using",
"gzip",
"if",
"it",
"is",
"gzipped"
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/fastq.py#L309-L315 | train | 217,896 |
bcbio/bcbio-nextgen | bcbio/variation/strelka2.py | get_region_bed | def get_region_bed(region, items, out_file, want_gzip=True):
"""Retrieve BED file of regions to analyze, either single or multi-region.
"""
variant_regions = bedutils.population_variant_regions(items, merged=True)
target = shared.subset_variant_regions(variant_regions, region, out_file, items)
if not target:
raise ValueError("Need BED input for strelka2 regions: %s %s" % (region, target))
if not isinstance(target, six.string_types) or not os.path.isfile(target):
chrom, start, end = target
target = "%s-regions.bed" % utils.splitext_plus(out_file)[0]
with file_transaction(items[0], target) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("%s\t%s\t%s\n" % (chrom, start, end))
out_file = target
if want_gzip:
out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"])
return out_file | python | def get_region_bed(region, items, out_file, want_gzip=True):
"""Retrieve BED file of regions to analyze, either single or multi-region.
"""
variant_regions = bedutils.population_variant_regions(items, merged=True)
target = shared.subset_variant_regions(variant_regions, region, out_file, items)
if not target:
raise ValueError("Need BED input for strelka2 regions: %s %s" % (region, target))
if not isinstance(target, six.string_types) or not os.path.isfile(target):
chrom, start, end = target
target = "%s-regions.bed" % utils.splitext_plus(out_file)[0]
with file_transaction(items[0], target) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
out_handle.write("%s\t%s\t%s\n" % (chrom, start, end))
out_file = target
if want_gzip:
out_file = vcfutils.bgzip_and_index(out_file, items[0]["config"])
return out_file | [
"def",
"get_region_bed",
"(",
"region",
",",
"items",
",",
"out_file",
",",
"want_gzip",
"=",
"True",
")",
":",
"variant_regions",
"=",
"bedutils",
".",
"population_variant_regions",
"(",
"items",
",",
"merged",
"=",
"True",
")",
"target",
"=",
"shared",
"."... | Retrieve BED file of regions to analyze, either single or multi-region. | [
"Retrieve",
"BED",
"file",
"of",
"regions",
"to",
"analyze",
"either",
"single",
"or",
"multi",
"-",
"region",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L36-L52 | train | 217,897 |
bcbio/bcbio-nextgen | bcbio/variation/strelka2.py | coverage_interval_from_bed | def coverage_interval_from_bed(bed_file, per_chrom=True):
"""Calculate a coverage interval for the current region BED.
This helps correctly work with cases of uneven coverage across an analysis
genome. strelka2 and other model based callers have flags for targeted and non
which depend on the local context.
Checks coverage per chromosome, avoiding non-standard chromosomes, if per_chrom is set.
Otherwise does a global check over all regions. The global check performs better for
strelka2 but not for DeepVariant:
https://github.com/bcbio/bcbio_validations/tree/master/deepvariant#deepvariant-v06-release-strelka2-stratification-and-initial-gatk-cnn
"""
total_starts = {}
total_ends = {}
bed_bases = collections.defaultdict(int)
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
parts = line.split()
if len(parts) >= 3:
chrom, start, end = parts[:3]
if chromhacks.is_autosomal(chrom):
start = int(start)
end = int(end)
bed_bases[chrom] += (end - start)
total_starts[chrom] = min([start, total_starts.get(chrom, sys.maxsize)])
total_ends[chrom] = max([end, total_ends.get(chrom, 0)])
# can check per chromosome -- any one chromosome with larger, or over all regions
if per_chrom:
freqs = [float(bed_bases[c]) / float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())]
elif len(bed_bases) > 0:
freqs = [sum([bed_bases[c] for c in sorted(bed_bases.keys())]) /
sum([float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())])]
else:
freqs = []
# Should be importing GENOME_COV_THRESH but get circular imports
if any([f >= 0.40 for f in freqs]):
return "genome"
else:
return "targeted" | python | def coverage_interval_from_bed(bed_file, per_chrom=True):
"""Calculate a coverage interval for the current region BED.
This helps correctly work with cases of uneven coverage across an analysis
genome. strelka2 and other model based callers have flags for targeted and non
which depend on the local context.
Checks coverage per chromosome, avoiding non-standard chromosomes, if per_chrom is set.
Otherwise does a global check over all regions. The global check performs better for
strelka2 but not for DeepVariant:
https://github.com/bcbio/bcbio_validations/tree/master/deepvariant#deepvariant-v06-release-strelka2-stratification-and-initial-gatk-cnn
"""
total_starts = {}
total_ends = {}
bed_bases = collections.defaultdict(int)
with utils.open_gzipsafe(bed_file) as in_handle:
for line in in_handle:
parts = line.split()
if len(parts) >= 3:
chrom, start, end = parts[:3]
if chromhacks.is_autosomal(chrom):
start = int(start)
end = int(end)
bed_bases[chrom] += (end - start)
total_starts[chrom] = min([start, total_starts.get(chrom, sys.maxsize)])
total_ends[chrom] = max([end, total_ends.get(chrom, 0)])
# can check per chromosome -- any one chromosome with larger, or over all regions
if per_chrom:
freqs = [float(bed_bases[c]) / float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())]
elif len(bed_bases) > 0:
freqs = [sum([bed_bases[c] for c in sorted(bed_bases.keys())]) /
sum([float(total_ends[c] - total_starts[c]) for c in sorted(bed_bases.keys())])]
else:
freqs = []
# Should be importing GENOME_COV_THRESH but get circular imports
if any([f >= 0.40 for f in freqs]):
return "genome"
else:
return "targeted" | [
"def",
"coverage_interval_from_bed",
"(",
"bed_file",
",",
"per_chrom",
"=",
"True",
")",
":",
"total_starts",
"=",
"{",
"}",
"total_ends",
"=",
"{",
"}",
"bed_bases",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"with",
"utils",
".",
"open_gzips... | Calculate a coverage interval for the current region BED.
This helps correctly work with cases of uneven coverage across an analysis
genome. strelka2 and other model based callers have flags for targeted and non
which depend on the local context.
Checks coverage per chromosome, avoiding non-standard chromosomes, if per_chrom is set.
Otherwise does a global check over all regions. The global check performs better for
strelka2 but not for DeepVariant:
https://github.com/bcbio/bcbio_validations/tree/master/deepvariant#deepvariant-v06-release-strelka2-stratification-and-initial-gatk-cnn | [
"Calculate",
"a",
"coverage",
"interval",
"for",
"the",
"current",
"region",
"BED",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L54-L93 | train | 217,898 |
bcbio/bcbio-nextgen | bcbio/variation/strelka2.py | _is_targeted_region | def _is_targeted_region(cur_bed, data):
"""Calculate if we should process region as a targeted or WGS.
Currently always based on total coverage interval, as that validates best and
is consistent between CWL (larger blocks) and non-CWL runs (smaller blocks).
We can check core usage and provide a consistent report when moving to CWL
exclusively.
"""
cores = dd.get_num_cores(data)
if cores > 0: # Apply to all core setups now for consistency
return dd.get_coverage_interval(data) not in ["genome"]
else:
return coverage_interval_from_bed(cur_bed, per_chrom=False) == "targeted" | python | def _is_targeted_region(cur_bed, data):
"""Calculate if we should process region as a targeted or WGS.
Currently always based on total coverage interval, as that validates best and
is consistent between CWL (larger blocks) and non-CWL runs (smaller blocks).
We can check core usage and provide a consistent report when moving to CWL
exclusively.
"""
cores = dd.get_num_cores(data)
if cores > 0: # Apply to all core setups now for consistency
return dd.get_coverage_interval(data) not in ["genome"]
else:
return coverage_interval_from_bed(cur_bed, per_chrom=False) == "targeted" | [
"def",
"_is_targeted_region",
"(",
"cur_bed",
",",
"data",
")",
":",
"cores",
"=",
"dd",
".",
"get_num_cores",
"(",
"data",
")",
"if",
"cores",
">",
"0",
":",
"# Apply to all core setups now for consistency",
"return",
"dd",
".",
"get_coverage_interval",
"(",
"d... | Calculate if we should process region as a targeted or WGS.
Currently always based on total coverage interval, as that validates best and
is consistent between CWL (larger blocks) and non-CWL runs (smaller blocks).
We can check core usage and provide a consistent report when moving to CWL
exclusively. | [
"Calculate",
"if",
"we",
"should",
"process",
"region",
"as",
"a",
"targeted",
"or",
"WGS",
"."
] | 6a9348c0054ccd5baffd22f1bb7d0422f6978b20 | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/strelka2.py#L95-L107 | train | 217,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.