repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
okpy/ok-client | client/utils/software_update.py | check_version | def check_version(server, version, filename, timeout=SHORT_TIMEOUT):
"""Check for the latest version of OK and update accordingly."""
address = VERSION_ENDPOINT.format(server=server)
print('Checking for software updates...')
log.info('Existing OK version: %s', version)
log.info('Checking latest version from %s', address)
try:
response = requests.get(address, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print('Network error when checking for updates.')
log.warning('Network error when checking version from %s: %s', address,
str(e), stack_info=True)
return False
response_json = response.json()
if not _validate_api_response(response_json):
print('Error while checking updates: malformed server response')
log.info('Malformed response from %s: %s', address, response.text)
return False
current_version = response_json['data']['results'][0]['current_version']
if current_version == version:
print('OK is up to date')
return True
download_link = response_json['data']['results'][0]['download_link']
log.info('Downloading version %s from %s', current_version, download_link)
try:
response = requests.get(download_link, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print('Error when downloading new version of OK')
log.warning('Error when downloading new version of OK: %s', str(e),
stack_info=True)
return False
log.info('Writing new version to %s', filename)
zip_binary = response.content
try:
_write_zip(filename, zip_binary)
except IOError as e:
print('Error when downloading new version of OK')
log.warning('Error writing to %s: %s', filename, str(e))
return False
else:
print('Updated to version: {}'.format(current_version))
log.info('Successfully wrote to %s', filename)
return True | python | def check_version(server, version, filename, timeout=SHORT_TIMEOUT):
"""Check for the latest version of OK and update accordingly."""
address = VERSION_ENDPOINT.format(server=server)
print('Checking for software updates...')
log.info('Existing OK version: %s', version)
log.info('Checking latest version from %s', address)
try:
response = requests.get(address, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print('Network error when checking for updates.')
log.warning('Network error when checking version from %s: %s', address,
str(e), stack_info=True)
return False
response_json = response.json()
if not _validate_api_response(response_json):
print('Error while checking updates: malformed server response')
log.info('Malformed response from %s: %s', address, response.text)
return False
current_version = response_json['data']['results'][0]['current_version']
if current_version == version:
print('OK is up to date')
return True
download_link = response_json['data']['results'][0]['download_link']
log.info('Downloading version %s from %s', current_version, download_link)
try:
response = requests.get(download_link, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.RequestException, requests.exceptions.BaseHTTPError) as e:
print('Error when downloading new version of OK')
log.warning('Error when downloading new version of OK: %s', str(e),
stack_info=True)
return False
log.info('Writing new version to %s', filename)
zip_binary = response.content
try:
_write_zip(filename, zip_binary)
except IOError as e:
print('Error when downloading new version of OK')
log.warning('Error writing to %s: %s', filename, str(e))
return False
else:
print('Updated to version: {}'.format(current_version))
log.info('Successfully wrote to %s', filename)
return True | [
"def",
"check_version",
"(",
"server",
",",
"version",
",",
"filename",
",",
"timeout",
"=",
"SHORT_TIMEOUT",
")",
":",
"address",
"=",
"VERSION_ENDPOINT",
".",
"format",
"(",
"server",
"=",
"server",
")",
"print",
"(",
"'Checking for software updates...'",
")",... | Check for the latest version of OK and update accordingly. | [
"Check",
"for",
"the",
"latest",
"version",
"of",
"OK",
"and",
"update",
"accordingly",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/utils/software_update.py#L11-L65 | train | 213,500 |
okpy/ok-client | client/sources/common/interpreter.py | CodeCase.run | def run(self):
"""Implements the GradedTestCase interface."""
self.console.load(self.lines, setup=self.setup, teardown=self.teardown)
return self.console.interpret() | python | def run(self):
"""Implements the GradedTestCase interface."""
self.console.load(self.lines, setup=self.setup, teardown=self.teardown)
return self.console.interpret() | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"console",
".",
"load",
"(",
"self",
".",
"lines",
",",
"setup",
"=",
"self",
".",
"setup",
",",
"teardown",
"=",
"self",
".",
"teardown",
")",
"return",
"self",
".",
"console",
".",
"interpret",
"(... | Implements the GradedTestCase interface. | [
"Implements",
"the",
"GradedTestCase",
"interface",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L41-L44 | train | 213,501 |
okpy/ok-client | client/sources/common/interpreter.py | CodeCase.unlock | def unlock(self, unique_id_prefix, case_id, interact):
"""Unlocks the CodeCase.
PARAMETERS:
unique_id_prefix -- string; a prefix of a unique identifier for this
Case, for purposes of analytics.
case_id -- string; an identifier for this Case, for purposes of
analytics.
interact -- function; handles user interaction during the unlocking
phase.
"""
print(self.setup.strip())
prompt_num = 0
current_prompt = []
try:
for line in self.lines:
if isinstance(line, str) and line:
print(line)
current_prompt.append(line)
elif isinstance(line, CodeAnswer):
prompt_num += 1
if not line.locked:
print('\n'.join(line.output))
continue
unique_id = self._construct_unique_id(unique_id_prefix, self.lines)
line.output = interact(unique_id,
case_id + ' > Prompt {}'.format(prompt_num),
'\n'.join(current_prompt),
line.output, line.choices)
line.locked = False
current_prompt = []
self.locked = False
finally:
self._sync_code() | python | def unlock(self, unique_id_prefix, case_id, interact):
"""Unlocks the CodeCase.
PARAMETERS:
unique_id_prefix -- string; a prefix of a unique identifier for this
Case, for purposes of analytics.
case_id -- string; an identifier for this Case, for purposes of
analytics.
interact -- function; handles user interaction during the unlocking
phase.
"""
print(self.setup.strip())
prompt_num = 0
current_prompt = []
try:
for line in self.lines:
if isinstance(line, str) and line:
print(line)
current_prompt.append(line)
elif isinstance(line, CodeAnswer):
prompt_num += 1
if not line.locked:
print('\n'.join(line.output))
continue
unique_id = self._construct_unique_id(unique_id_prefix, self.lines)
line.output = interact(unique_id,
case_id + ' > Prompt {}'.format(prompt_num),
'\n'.join(current_prompt),
line.output, line.choices)
line.locked = False
current_prompt = []
self.locked = False
finally:
self._sync_code() | [
"def",
"unlock",
"(",
"self",
",",
"unique_id_prefix",
",",
"case_id",
",",
"interact",
")",
":",
"print",
"(",
"self",
".",
"setup",
".",
"strip",
"(",
")",
")",
"prompt_num",
"=",
"0",
"current_prompt",
"=",
"[",
"]",
"try",
":",
"for",
"line",
"in... | Unlocks the CodeCase.
PARAMETERS:
unique_id_prefix -- string; a prefix of a unique identifier for this
Case, for purposes of analytics.
case_id -- string; an identifier for this Case, for purposes of
analytics.
interact -- function; handles user interaction during the unlocking
phase. | [
"Unlocks",
"the",
"CodeCase",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L55-L89 | train | 213,502 |
okpy/ok-client | client/sources/common/interpreter.py | CodeCase.split_code | def split_code(cls, code, PS1, PS2):
"""Splits the given string of code based on the provided PS1 and PS2
symbols.
PARAMETERS:
code -- str; lines of interpretable code, using PS1 and PS2 prompts
PS1 -- str; first-level prompt symbol
PS2 -- str; second-level prompt symbol
RETURN:
list; a processed sequence of lines corresponding to the input code.
"""
processed_lines = []
for line in textwrap.dedent(code).splitlines():
if not line or line.startswith(PS1) or line.startswith(PS2):
processed_lines.append(line)
continue
assert len(processed_lines) > 0, 'code improperly formatted: {}'.format(code)
if not isinstance(processed_lines[-1], CodeAnswer):
processed_lines.append(CodeAnswer())
processed_lines[-1].update(line)
return processed_lines | python | def split_code(cls, code, PS1, PS2):
"""Splits the given string of code based on the provided PS1 and PS2
symbols.
PARAMETERS:
code -- str; lines of interpretable code, using PS1 and PS2 prompts
PS1 -- str; first-level prompt symbol
PS2 -- str; second-level prompt symbol
RETURN:
list; a processed sequence of lines corresponding to the input code.
"""
processed_lines = []
for line in textwrap.dedent(code).splitlines():
if not line or line.startswith(PS1) or line.startswith(PS2):
processed_lines.append(line)
continue
assert len(processed_lines) > 0, 'code improperly formatted: {}'.format(code)
if not isinstance(processed_lines[-1], CodeAnswer):
processed_lines.append(CodeAnswer())
processed_lines[-1].update(line)
return processed_lines | [
"def",
"split_code",
"(",
"cls",
",",
"code",
",",
"PS1",
",",
"PS2",
")",
":",
"processed_lines",
"=",
"[",
"]",
"for",
"line",
"in",
"textwrap",
".",
"dedent",
"(",
"code",
")",
".",
"splitlines",
"(",
")",
":",
"if",
"not",
"line",
"or",
"line",... | Splits the given string of code based on the provided PS1 and PS2
symbols.
PARAMETERS:
code -- str; lines of interpretable code, using PS1 and PS2 prompts
PS1 -- str; first-level prompt symbol
PS2 -- str; second-level prompt symbol
RETURN:
list; a processed sequence of lines corresponding to the input code. | [
"Splits",
"the",
"given",
"string",
"of",
"code",
"based",
"on",
"the",
"provided",
"PS1",
"and",
"PS2",
"symbols",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L92-L114 | train | 213,503 |
okpy/ok-client | client/sources/common/interpreter.py | CodeCase._sync_code | def _sync_code(self):
"""Syncs the current state of self.lines with self.code, the
serializable string representing the set of code.
"""
new_code = []
for line in self.lines:
if isinstance(line, CodeAnswer):
new_code.append(line.dump())
else:
new_code.append(line)
self.code = '\n'.join(new_code) | python | def _sync_code(self):
"""Syncs the current state of self.lines with self.code, the
serializable string representing the set of code.
"""
new_code = []
for line in self.lines:
if isinstance(line, CodeAnswer):
new_code.append(line.dump())
else:
new_code.append(line)
self.code = '\n'.join(new_code) | [
"def",
"_sync_code",
"(",
"self",
")",
":",
"new_code",
"=",
"[",
"]",
"for",
"line",
"in",
"self",
".",
"lines",
":",
"if",
"isinstance",
"(",
"line",
",",
"CodeAnswer",
")",
":",
"new_code",
".",
"append",
"(",
"line",
".",
"dump",
"(",
")",
")",... | Syncs the current state of self.lines with self.code, the
serializable string representing the set of code. | [
"Syncs",
"the",
"current",
"state",
"of",
"self",
".",
"lines",
"with",
"self",
".",
"code",
"the",
"serializable",
"string",
"representing",
"the",
"set",
"of",
"code",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L116-L126 | train | 213,504 |
okpy/ok-client | client/sources/common/interpreter.py | CodeCase._construct_unique_id | def _construct_unique_id(self, id_prefix, lines):
"""Constructs a unique ID for a particular prompt in this case,
based on the id_prefix and the lines in the prompt.
"""
text = []
for line in lines:
if isinstance(line, str):
text.append(line)
elif isinstance(line, CodeAnswer):
text.append(line.dump())
return id_prefix + '\n' + '\n'.join(text) | python | def _construct_unique_id(self, id_prefix, lines):
"""Constructs a unique ID for a particular prompt in this case,
based on the id_prefix and the lines in the prompt.
"""
text = []
for line in lines:
if isinstance(line, str):
text.append(line)
elif isinstance(line, CodeAnswer):
text.append(line.dump())
return id_prefix + '\n' + '\n'.join(text) | [
"def",
"_construct_unique_id",
"(",
"self",
",",
"id_prefix",
",",
"lines",
")",
":",
"text",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"if",
"isinstance",
"(",
"line",
",",
"str",
")",
":",
"text",
".",
"append",
"(",
"line",
")",
"elif",
... | Constructs a unique ID for a particular prompt in this case,
based on the id_prefix and the lines in the prompt. | [
"Constructs",
"a",
"unique",
"ID",
"for",
"a",
"particular",
"prompt",
"in",
"this",
"case",
"based",
"on",
"the",
"id_prefix",
"and",
"the",
"lines",
"in",
"the",
"prompt",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L128-L138 | train | 213,505 |
okpy/ok-client | client/sources/common/interpreter.py | Console.interpret | def interpret(self):
"""Interprets the console on the loaded code.
RETURNS:
bool; True if the code passes, False otherwise.
"""
if not self._interpret_lines(self._setup):
return False
success = self._interpret_lines(self._code, compare_all=True)
success &= self._interpret_lines(self._teardown)
return success | python | def interpret(self):
"""Interprets the console on the loaded code.
RETURNS:
bool; True if the code passes, False otherwise.
"""
if not self._interpret_lines(self._setup):
return False
success = self._interpret_lines(self._code, compare_all=True)
success &= self._interpret_lines(self._teardown)
return success | [
"def",
"interpret",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_interpret_lines",
"(",
"self",
".",
"_setup",
")",
":",
"return",
"False",
"success",
"=",
"self",
".",
"_interpret_lines",
"(",
"self",
".",
"_code",
",",
"compare_all",
"=",
"True",... | Interprets the console on the loaded code.
RETURNS:
bool; True if the code passes, False otherwise. | [
"Interprets",
"the",
"console",
"on",
"the",
"loaded",
"code",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L172-L183 | train | 213,506 |
okpy/ok-client | client/sources/common/interpreter.py | Console._interpret_lines | def _interpret_lines(self, lines, compare_all=False):
"""Interprets the set of lines.
PARAMTERS:
lines -- list of str; lines of code
compare_all -- bool; if True, check for no output for lines that are not
followed by a CodeAnswer
RETURNS:
bool; True if successful, False otherwise.
"""
current = []
for line in lines + ['']:
if isinstance(line, str):
if current and (line.startswith(self.PS1) or not line):
# Previous prompt ends when PS1 or a blank line occurs
try:
if compare_all:
self._compare(CodeAnswer(), '\n'.join(current))
else:
self.evaluate('\n'.join(current))
except ConsoleException:
return False
current = []
if line:
print(line)
line = self._strip_prompt(line)
current.append(line)
elif isinstance(line, CodeAnswer):
assert len(current) > 0, 'Answer without a prompt'
try:
self._compare(line, '\n'.join(current))
except ConsoleException:
return False
current = []
return True | python | def _interpret_lines(self, lines, compare_all=False):
"""Interprets the set of lines.
PARAMTERS:
lines -- list of str; lines of code
compare_all -- bool; if True, check for no output for lines that are not
followed by a CodeAnswer
RETURNS:
bool; True if successful, False otherwise.
"""
current = []
for line in lines + ['']:
if isinstance(line, str):
if current and (line.startswith(self.PS1) or not line):
# Previous prompt ends when PS1 or a blank line occurs
try:
if compare_all:
self._compare(CodeAnswer(), '\n'.join(current))
else:
self.evaluate('\n'.join(current))
except ConsoleException:
return False
current = []
if line:
print(line)
line = self._strip_prompt(line)
current.append(line)
elif isinstance(line, CodeAnswer):
assert len(current) > 0, 'Answer without a prompt'
try:
self._compare(line, '\n'.join(current))
except ConsoleException:
return False
current = []
return True | [
"def",
"_interpret_lines",
"(",
"self",
",",
"lines",
",",
"compare_all",
"=",
"False",
")",
":",
"current",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
"+",
"[",
"''",
"]",
":",
"if",
"isinstance",
"(",
"line",
",",
"str",
")",
":",
"if",
"curren... | Interprets the set of lines.
PARAMTERS:
lines -- list of str; lines of code
compare_all -- bool; if True, check for no output for lines that are not
followed by a CodeAnswer
RETURNS:
bool; True if successful, False otherwise. | [
"Interprets",
"the",
"set",
"of",
"lines",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L208-L243 | train | 213,507 |
okpy/ok-client | client/sources/common/interpreter.py | CodeAnswer.dump | def dump(self):
"""Serialize a test case to a string."""
result = list(self.output_lines())
if self.locked:
result.append('# locked')
if self.choices:
for choice in self.choices:
result.append('# choice: ' + choice)
if self.explanation:
result.append('# explanation: ' + self.explanation)
return '\n'.join(result) | python | def dump(self):
"""Serialize a test case to a string."""
result = list(self.output_lines())
if self.locked:
result.append('# locked')
if self.choices:
for choice in self.choices:
result.append('# choice: ' + choice)
if self.explanation:
result.append('# explanation: ' + self.explanation)
return '\n'.join(result) | [
"def",
"dump",
"(",
"self",
")",
":",
"result",
"=",
"list",
"(",
"self",
".",
"output_lines",
"(",
")",
")",
"if",
"self",
".",
"locked",
":",
"result",
".",
"append",
"(",
"'# locked'",
")",
"if",
"self",
".",
"choices",
":",
"for",
"choice",
"in... | Serialize a test case to a string. | [
"Serialize",
"a",
"test",
"case",
"to",
"a",
"string",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L310-L320 | train | 213,508 |
okpy/ok-client | client/sources/common/interpreter.py | CodeAnswer.output_lines | def output_lines(self):
"""Return a sequence of lines, suitable for printing or comparing
answers.
"""
if self.exception:
return [self.EXCEPTION_HEADERS[0], ' ...'] + self.exception_detail
else:
return self.output | python | def output_lines(self):
"""Return a sequence of lines, suitable for printing or comparing
answers.
"""
if self.exception:
return [self.EXCEPTION_HEADERS[0], ' ...'] + self.exception_detail
else:
return self.output | [
"def",
"output_lines",
"(",
"self",
")",
":",
"if",
"self",
".",
"exception",
":",
"return",
"[",
"self",
".",
"EXCEPTION_HEADERS",
"[",
"0",
"]",
",",
"' ...'",
"]",
"+",
"self",
".",
"exception_detail",
"else",
":",
"return",
"self",
".",
"output"
] | Return a sequence of lines, suitable for printing or comparing
answers. | [
"Return",
"a",
"sequence",
"of",
"lines",
"suitable",
"for",
"printing",
"or",
"comparing",
"answers",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/interpreter.py#L322-L329 | train | 213,509 |
okpy/ok-client | client/utils/format.py | prettyjson | def prettyjson(json, indentation=' '):
"""Formats a Python-object into a string in a JSON like way, but
uses triple quotes for multiline strings.
PARAMETERS:
json -- Python object that is serializable into json.
indentation -- str; represents one level of indentation
NOTES:
All multiline strings are treated as raw strings.
RETURNS:
str; the formatted json-like string.
"""
if isinstance(json, int) or isinstance(json, float):
return str(json)
elif isinstance(json, str):
if '\n' in json:
return 'r"""\n' + dedent(json) + '\n"""'
return repr(json)
elif isinstance(json, list):
lst = [indent(prettyjson(el, indentation), indentation) for el in json]
return '[\n' + ',\n'.join(lst) + '\n]'
elif isinstance(json, dict):
pairs = []
for k, v in sorted(json.items()):
k = prettyjson(k, indentation)
v = prettyjson(v, indentation)
pairs.append(indent(k + ': ' + v, indentation))
return '{\n' + ',\n'.join(pairs) + '\n}'
else:
raise exceptions.SerializeException('Invalid json type: {}'.format(json)) | python | def prettyjson(json, indentation=' '):
"""Formats a Python-object into a string in a JSON like way, but
uses triple quotes for multiline strings.
PARAMETERS:
json -- Python object that is serializable into json.
indentation -- str; represents one level of indentation
NOTES:
All multiline strings are treated as raw strings.
RETURNS:
str; the formatted json-like string.
"""
if isinstance(json, int) or isinstance(json, float):
return str(json)
elif isinstance(json, str):
if '\n' in json:
return 'r"""\n' + dedent(json) + '\n"""'
return repr(json)
elif isinstance(json, list):
lst = [indent(prettyjson(el, indentation), indentation) for el in json]
return '[\n' + ',\n'.join(lst) + '\n]'
elif isinstance(json, dict):
pairs = []
for k, v in sorted(json.items()):
k = prettyjson(k, indentation)
v = prettyjson(v, indentation)
pairs.append(indent(k + ': ' + v, indentation))
return '{\n' + ',\n'.join(pairs) + '\n}'
else:
raise exceptions.SerializeException('Invalid json type: {}'.format(json)) | [
"def",
"prettyjson",
"(",
"json",
",",
"indentation",
"=",
"' '",
")",
":",
"if",
"isinstance",
"(",
"json",
",",
"int",
")",
"or",
"isinstance",
"(",
"json",
",",
"float",
")",
":",
"return",
"str",
"(",
"json",
")",
"elif",
"isinstance",
"(",
"jso... | Formats a Python-object into a string in a JSON like way, but
uses triple quotes for multiline strings.
PARAMETERS:
json -- Python object that is serializable into json.
indentation -- str; represents one level of indentation
NOTES:
All multiline strings are treated as raw strings.
RETURNS:
str; the formatted json-like string. | [
"Formats",
"a",
"Python",
"-",
"object",
"into",
"a",
"string",
"in",
"a",
"JSON",
"like",
"way",
"but",
"uses",
"triple",
"quotes",
"for",
"multiline",
"strings",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/utils/format.py#L129-L160 | train | 213,510 |
okpy/ok-client | client/api/notebook.py | validate_contents | def validate_contents(file_contents):
"""Ensures that all ipynb files in FILE_CONTENTS
are valid JSON files."""
for name, contents in file_contents.items():
if os.path.splitext(name)[1] != '.ipynb':
continue
if not contents:
return False
try:
json_object = json.loads(contents)
except ValueError:
return False
return True | python | def validate_contents(file_contents):
"""Ensures that all ipynb files in FILE_CONTENTS
are valid JSON files."""
for name, contents in file_contents.items():
if os.path.splitext(name)[1] != '.ipynb':
continue
if not contents:
return False
try:
json_object = json.loads(contents)
except ValueError:
return False
return True | [
"def",
"validate_contents",
"(",
"file_contents",
")",
":",
"for",
"name",
",",
"contents",
"in",
"file_contents",
".",
"items",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"name",
")",
"[",
"1",
"]",
"!=",
"'.ipynb'",
":",
"continue"... | Ensures that all ipynb files in FILE_CONTENTS
are valid JSON files. | [
"Ensures",
"that",
"all",
"ipynb",
"files",
"in",
"FILE_CONTENTS",
"are",
"valid",
"JSON",
"files",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/api/notebook.py#L144-L156 | train | 213,511 |
okpy/ok-client | client/api/notebook.py | wait_for_save | def wait_for_save(filename, timeout=5):
"""Waits for FILENAME to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise.
"""
modification_time = os.path.getmtime(filename)
start_time = time.time()
while time.time() < start_time + timeout:
if (os.path.getmtime(filename) > modification_time and
os.path.getsize(filename) > 0):
return True
time.sleep(0.2)
return False | python | def wait_for_save(filename, timeout=5):
"""Waits for FILENAME to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise.
"""
modification_time = os.path.getmtime(filename)
start_time = time.time()
while time.time() < start_time + timeout:
if (os.path.getmtime(filename) > modification_time and
os.path.getsize(filename) > 0):
return True
time.sleep(0.2)
return False | [
"def",
"wait_for_save",
"(",
"filename",
",",
"timeout",
"=",
"5",
")",
":",
"modification_time",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"filename",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"time",
".",
"time",
"(",
")",
... | Waits for FILENAME to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise. | [
"Waits",
"for",
"FILENAME",
"to",
"update",
"waiting",
"up",
"to",
"TIMEOUT",
"seconds",
".",
"Returns",
"True",
"if",
"a",
"save",
"was",
"detected",
"and",
"False",
"otherwise",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/api/notebook.py#L158-L169 | train | 213,512 |
okpy/ok-client | client/api/notebook.py | Notebook.score | def score(self, env=None, score_out=None):
""" Run the scoring protocol.
score_out -- str; a file name to write the point breakdown
into.
Returns: dict; maps score tag (str) -> points (float)
"""
messages = {}
self.assignment.set_args(
score=True,
score_out=score_out,
)
if env is None:
import __main__
env = __main__.__dict__
self.run('scoring', messages, env=env)
return messages['scoring'] | python | def score(self, env=None, score_out=None):
""" Run the scoring protocol.
score_out -- str; a file name to write the point breakdown
into.
Returns: dict; maps score tag (str) -> points (float)
"""
messages = {}
self.assignment.set_args(
score=True,
score_out=score_out,
)
if env is None:
import __main__
env = __main__.__dict__
self.run('scoring', messages, env=env)
return messages['scoring'] | [
"def",
"score",
"(",
"self",
",",
"env",
"=",
"None",
",",
"score_out",
"=",
"None",
")",
":",
"messages",
"=",
"{",
"}",
"self",
".",
"assignment",
".",
"set_args",
"(",
"score",
"=",
"True",
",",
"score_out",
"=",
"score_out",
",",
")",
"if",
"en... | Run the scoring protocol.
score_out -- str; a file name to write the point breakdown
into.
Returns: dict; maps score tag (str) -> points (float) | [
"Run",
"the",
"scoring",
"protocol",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/api/notebook.py#L43-L60 | train | 213,513 |
okpy/ok-client | client/api/notebook.py | Notebook.save_notebook | def save_notebook(self):
""" Saves the current notebook by
injecting JavaScript to save to .ipynb file.
"""
try:
from IPython.display import display, Javascript
except ImportError:
log.warning("Could not import IPython Display Function")
print("Make sure to save your notebook before sending it to OK!")
return
if self.mode == "jupyter":
display(Javascript('IPython.notebook.save_checkpoint();'))
display(Javascript('IPython.notebook.save_notebook();'))
elif self.mode == "jupyterlab":
display(Javascript('document.querySelector(\'[data-command="docmanager:save"]\').click();'))
print('Saving notebook...', end=' ')
ipynbs = [path for path in self.assignment.src
if os.path.splitext(path)[1] == '.ipynb']
# Wait for first .ipynb to save
if ipynbs:
if wait_for_save(ipynbs[0]):
print("Saved '{}'.".format(ipynbs[0]))
else:
log.warning("Timed out waiting for IPython save")
print("Could not automatically save \'{}\'".format(ipynbs[0]))
print("Make sure your notebook"
" is correctly named and saved before submitting to OK!".format(ipynbs[0]))
return False
else:
print("No valid file sources found")
return True | python | def save_notebook(self):
""" Saves the current notebook by
injecting JavaScript to save to .ipynb file.
"""
try:
from IPython.display import display, Javascript
except ImportError:
log.warning("Could not import IPython Display Function")
print("Make sure to save your notebook before sending it to OK!")
return
if self.mode == "jupyter":
display(Javascript('IPython.notebook.save_checkpoint();'))
display(Javascript('IPython.notebook.save_notebook();'))
elif self.mode == "jupyterlab":
display(Javascript('document.querySelector(\'[data-command="docmanager:save"]\').click();'))
print('Saving notebook...', end=' ')
ipynbs = [path for path in self.assignment.src
if os.path.splitext(path)[1] == '.ipynb']
# Wait for first .ipynb to save
if ipynbs:
if wait_for_save(ipynbs[0]):
print("Saved '{}'.".format(ipynbs[0]))
else:
log.warning("Timed out waiting for IPython save")
print("Could not automatically save \'{}\'".format(ipynbs[0]))
print("Make sure your notebook"
" is correctly named and saved before submitting to OK!".format(ipynbs[0]))
return False
else:
print("No valid file sources found")
return True | [
"def",
"save_notebook",
"(",
"self",
")",
":",
"try",
":",
"from",
"IPython",
".",
"display",
"import",
"display",
",",
"Javascript",
"except",
"ImportError",
":",
"log",
".",
"warning",
"(",
"\"Could not import IPython Display Function\"",
")",
"print",
"(",
"\... | Saves the current notebook by
injecting JavaScript to save to .ipynb file. | [
"Saves",
"the",
"current",
"notebook",
"by",
"injecting",
"JavaScript",
"to",
"save",
"to",
".",
"ipynb",
"file",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/api/notebook.py#L94-L127 | train | 213,514 |
okpy/ok-client | client/cli/lock.py | main | def main():
"""Run the LockingProtocol."""
args = parse_input()
args.lock = True
args.question = []
args.all = False
args.timeout = 0
args.verbose = False
args.interactive = False
try:
assign = assignment.load_assignment(args.config, args)
msgs = messages.Messages()
lock.protocol(args, assign).run(msgs)
except (ex.LoadingException, ex.SerializeException) as e:
log.warning('Assignment could not instantiate', exc_info=True)
print('Error: ' + str(e).strip())
exit(1)
except (KeyboardInterrupt, EOFError):
log.info('Quitting...')
else:
assign.dump_tests() | python | def main():
"""Run the LockingProtocol."""
args = parse_input()
args.lock = True
args.question = []
args.all = False
args.timeout = 0
args.verbose = False
args.interactive = False
try:
assign = assignment.load_assignment(args.config, args)
msgs = messages.Messages()
lock.protocol(args, assign).run(msgs)
except (ex.LoadingException, ex.SerializeException) as e:
log.warning('Assignment could not instantiate', exc_info=True)
print('Error: ' + str(e).strip())
exit(1)
except (KeyboardInterrupt, EOFError):
log.info('Quitting...')
else:
assign.dump_tests() | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"parse_input",
"(",
")",
"args",
".",
"lock",
"=",
"True",
"args",
".",
"question",
"=",
"[",
"]",
"args",
".",
"all",
"=",
"False",
"args",
".",
"timeout",
"=",
"0",
"args",
".",
"verbose",
"=",
"False"... | Run the LockingProtocol. | [
"Run",
"the",
"LockingProtocol",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/cli/lock.py#L23-L46 | train | 213,515 |
okpy/ok-client | client/cli/publish.py | write_tree | def write_tree(zipf, src_directory, dst_directory):
"""Write all .py files in a source directory to a destination directory
inside a zip archive.
"""
if not os.path.exists(src_directory):
abort('Tree ' + src_directory + ' does not exist.')
for root, _, files in os.walk(src_directory):
for filename in files:
if not filename.endswith(('.py', '.pem')):
continue
fullname = os.path.join(root, filename)
arcname = fullname.replace(src_directory, dst_directory)
zipf.write(fullname, arcname=arcname) | python | def write_tree(zipf, src_directory, dst_directory):
"""Write all .py files in a source directory to a destination directory
inside a zip archive.
"""
if not os.path.exists(src_directory):
abort('Tree ' + src_directory + ' does not exist.')
for root, _, files in os.walk(src_directory):
for filename in files:
if not filename.endswith(('.py', '.pem')):
continue
fullname = os.path.join(root, filename)
arcname = fullname.replace(src_directory, dst_directory)
zipf.write(fullname, arcname=arcname) | [
"def",
"write_tree",
"(",
"zipf",
",",
"src_directory",
",",
"dst_directory",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"src_directory",
")",
":",
"abort",
"(",
"'Tree '",
"+",
"src_directory",
"+",
"' does not exist.'",
")",
"for",
"ro... | Write all .py files in a source directory to a destination directory
inside a zip archive. | [
"Write",
"all",
".",
"py",
"files",
"in",
"a",
"source",
"directory",
"to",
"a",
"destination",
"directory",
"inside",
"a",
"zip",
"archive",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/cli/publish.py#L28-L40 | train | 213,516 |
okpy/ok-client | client/utils/output.py | _OutputLogger.new_log | def new_log(self):
"""Registers a new log so that calls to write will append to the log.
RETURN:
int; a unique ID to reference the log.
"""
log_id = self._num_logs
self._logs[log_id] = []
self._num_logs += 1
return log_id | python | def new_log(self):
"""Registers a new log so that calls to write will append to the log.
RETURN:
int; a unique ID to reference the log.
"""
log_id = self._num_logs
self._logs[log_id] = []
self._num_logs += 1
return log_id | [
"def",
"new_log",
"(",
"self",
")",
":",
"log_id",
"=",
"self",
".",
"_num_logs",
"self",
".",
"_logs",
"[",
"log_id",
"]",
"=",
"[",
"]",
"self",
".",
"_num_logs",
"+=",
"1",
"return",
"log_id"
] | Registers a new log so that calls to write will append to the log.
RETURN:
int; a unique ID to reference the log. | [
"Registers",
"a",
"new",
"log",
"so",
"that",
"calls",
"to",
"write",
"will",
"append",
"to",
"the",
"log",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/utils/output.py#L24-L33 | train | 213,517 |
okpy/ok-client | client/protocols/scoring.py | display_breakdown | def display_breakdown(scores, outfile=None):
"""Writes the point breakdown to `outfile` given a dictionary of scores.
`outfile` should be a string. If `outfile` is None, write to stdout.
RETURNS:
dict; 'Total' -> finalized score (float)
"""
total = 0
outfile = open(outfile, 'w') if outfile else sys.stdout
format.print_line('-')
print('Point breakdown', file=outfile)
for name, (score, max_score) in scores.items():
print(' {}: {}/{}'.format(name, score, max_score), file=outfile)
total += score
print(file=outfile)
print('Score:', file=outfile)
print(' Total: {}'.format(total), file=outfile)
return {'Total': total} | python | def display_breakdown(scores, outfile=None):
"""Writes the point breakdown to `outfile` given a dictionary of scores.
`outfile` should be a string. If `outfile` is None, write to stdout.
RETURNS:
dict; 'Total' -> finalized score (float)
"""
total = 0
outfile = open(outfile, 'w') if outfile else sys.stdout
format.print_line('-')
print('Point breakdown', file=outfile)
for name, (score, max_score) in scores.items():
print(' {}: {}/{}'.format(name, score, max_score), file=outfile)
total += score
print(file=outfile)
print('Score:', file=outfile)
print(' Total: {}'.format(total), file=outfile)
return {'Total': total} | [
"def",
"display_breakdown",
"(",
"scores",
",",
"outfile",
"=",
"None",
")",
":",
"total",
"=",
"0",
"outfile",
"=",
"open",
"(",
"outfile",
",",
"'w'",
")",
"if",
"outfile",
"else",
"sys",
".",
"stdout",
"format",
".",
"print_line",
"(",
"'-'",
")",
... | Writes the point breakdown to `outfile` given a dictionary of scores.
`outfile` should be a string. If `outfile` is None, write to stdout.
RETURNS:
dict; 'Total' -> finalized score (float) | [
"Writes",
"the",
"point",
"breakdown",
"to",
"outfile",
"given",
"a",
"dictionary",
"of",
"scores",
".",
"outfile",
"should",
"be",
"a",
"string",
".",
"If",
"outfile",
"is",
"None",
"write",
"to",
"stdout",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/scoring.py#L57-L76 | train | 213,518 |
okpy/ok-client | client/protocols/scoring.py | ScoringProtocol.run | def run(self, messages, env=None):
"""Score tests and print results. Tests are taken from
self.assignment.specified_tests. A score breakdown by question and the
total score are both printed.
ENV is used by the programatic API for Python doctests only.
"""
if not self.args.score or self.args.testing:
return
format.print_line('~')
print('Scoring tests')
print()
raw_scores = OrderedDict()
for test in self.assignment.specified_tests:
assert isinstance(test, sources_models.Test), 'ScoringProtocol received invalid test'
log.info('Scoring test {}'.format(test.name))
# A hack that allows programmatic API users to plumb a custom
# environment through to Python tests.
# Use type to ensure is an actual OkTest and not a subclass
if type(test) == ok_test_models.OkTest:
score = test.score(env=env)
else:
score = test.score()
raw_scores[test.name] = (score, test.points)
messages['scoring'] = display_breakdown(raw_scores, self.args.score_out)
print() | python | def run(self, messages, env=None):
"""Score tests and print results. Tests are taken from
self.assignment.specified_tests. A score breakdown by question and the
total score are both printed.
ENV is used by the programatic API for Python doctests only.
"""
if not self.args.score or self.args.testing:
return
format.print_line('~')
print('Scoring tests')
print()
raw_scores = OrderedDict()
for test in self.assignment.specified_tests:
assert isinstance(test, sources_models.Test), 'ScoringProtocol received invalid test'
log.info('Scoring test {}'.format(test.name))
# A hack that allows programmatic API users to plumb a custom
# environment through to Python tests.
# Use type to ensure is an actual OkTest and not a subclass
if type(test) == ok_test_models.OkTest:
score = test.score(env=env)
else:
score = test.score()
raw_scores[test.name] = (score, test.points)
messages['scoring'] = display_breakdown(raw_scores, self.args.score_out)
print() | [
"def",
"run",
"(",
"self",
",",
"messages",
",",
"env",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"args",
".",
"score",
"or",
"self",
".",
"args",
".",
"testing",
":",
"return",
"format",
".",
"print_line",
"(",
"'~'",
")",
"print",
"(",
"... | Score tests and print results. Tests are taken from
self.assignment.specified_tests. A score breakdown by question and the
total score are both printed.
ENV is used by the programatic API for Python doctests only. | [
"Score",
"tests",
"and",
"print",
"results",
".",
"Tests",
"are",
"taken",
"from",
"self",
".",
"assignment",
".",
"specified_tests",
".",
"A",
"score",
"breakdown",
"by",
"question",
"and",
"the",
"total",
"score",
"are",
"both",
"printed",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/scoring.py#L24-L55 | train | 213,519 |
okpy/ok-client | client/utils/locking.py | lock | def lock(key, text):
"""Locks the given text using the given key and returns the result"""
return hmac.new(key.encode('utf-8'), text.encode('utf-8')).hexdigest() | python | def lock(key, text):
"""Locks the given text using the given key and returns the result"""
return hmac.new(key.encode('utf-8'), text.encode('utf-8')).hexdigest() | [
"def",
"lock",
"(",
"key",
",",
"text",
")",
":",
"return",
"hmac",
".",
"new",
"(",
"key",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"text",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")"
] | Locks the given text using the given key and returns the result | [
"Locks",
"the",
"given",
"text",
"using",
"the",
"given",
"key",
"and",
"returns",
"the",
"result"
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/utils/locking.py#L5-L7 | train | 213,520 |
okpy/ok-client | client/protocols/unlock.py | UnlockProtocol.run | def run(self, messages):
"""Responsible for unlocking each test.
The unlocking process can be aborted by raising a KeyboardInterrupt or
an EOFError.
RETURNS:
dict; mapping of test name (str) -> JSON-serializable object. It is up
to each test to determine what information is significant for analytics.
"""
if not self.args.unlock:
return
format.print_line('~')
print('Unlocking tests')
print()
print('At each "{}", type what you would expect the output to be.'.format(
self.PROMPT))
print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
print()
for test in self.assignment.specified_tests:
log.info('Unlocking test {}'.format(test.name))
self.current_test = test.name
# Reset guidance explanation probability for every question
self.guidance_util.prompt_probability = guidance.DEFAULT_PROMPT_PROBABILITY
try:
test.unlock(self.interact)
except (KeyboardInterrupt, EOFError):
try:
# TODO(albert): When you use Ctrl+C in Windows, it
# throws two exceptions, so you need to catch both
# of them. Find a cleaner fix for this.
print()
print('-- Exiting unlocker --')
except (KeyboardInterrupt, EOFError):
pass
print()
break
messages['unlock'] = self.analytics | python | def run(self, messages):
"""Responsible for unlocking each test.
The unlocking process can be aborted by raising a KeyboardInterrupt or
an EOFError.
RETURNS:
dict; mapping of test name (str) -> JSON-serializable object. It is up
to each test to determine what information is significant for analytics.
"""
if not self.args.unlock:
return
format.print_line('~')
print('Unlocking tests')
print()
print('At each "{}", type what you would expect the output to be.'.format(
self.PROMPT))
print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
print()
for test in self.assignment.specified_tests:
log.info('Unlocking test {}'.format(test.name))
self.current_test = test.name
# Reset guidance explanation probability for every question
self.guidance_util.prompt_probability = guidance.DEFAULT_PROMPT_PROBABILITY
try:
test.unlock(self.interact)
except (KeyboardInterrupt, EOFError):
try:
# TODO(albert): When you use Ctrl+C in Windows, it
# throws two exceptions, so you need to catch both
# of them. Find a cleaner fix for this.
print()
print('-- Exiting unlocker --')
except (KeyboardInterrupt, EOFError):
pass
print()
break
messages['unlock'] = self.analytics | [
"def",
"run",
"(",
"self",
",",
"messages",
")",
":",
"if",
"not",
"self",
".",
"args",
".",
"unlock",
":",
"return",
"format",
".",
"print_line",
"(",
"'~'",
")",
"print",
"(",
"'Unlocking tests'",
")",
"print",
"(",
")",
"print",
"(",
"'At each \"{}\... | Responsible for unlocking each test.
The unlocking process can be aborted by raising a KeyboardInterrupt or
an EOFError.
RETURNS:
dict; mapping of test name (str) -> JSON-serializable object. It is up
to each test to determine what information is significant for analytics. | [
"Responsible",
"for",
"unlocking",
"each",
"test",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/unlock.py#L46-L88 | train | 213,521 |
okpy/ok-client | client/protocols/unlock.py | UnlockProtocol.interact | def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True):
"""Reads student input for unlocking tests until the student
answers correctly.
PARAMETERS:
unique_id -- str; the ID that is recorded with this unlocking
attempt.
case_id -- str; the ID that is recorded with this unlocking
attempt.
question_prompt -- str; the question prompt
answer -- list; a list of locked lines in a test case answer.
choices -- list or None; a list of choices. If None or an
empty list, signifies the question is not multiple
choice.
randomize -- bool; if True, randomizes the choices on first
invocation.
DESCRIPTION:
Continually prompt the student for an answer to an unlocking
question until one of the folliwng happens:
1. The student supplies the correct answer, in which case
the supplied answer is returned
2. The student aborts abnormally (either by typing 'exit()'
or using Ctrl-C/D. In this case, return None
Correctness is determined by the verify method.
RETURNS:
list; the correct solution (that the student supplied). Each element
in the list is a line of the correct output.
"""
if randomize and choices:
choices = random.sample(choices, len(choices))
correct = False
while not correct:
if choices:
assert len(answer) == 1, 'Choices must have 1 line of output'
choice_map = self._display_choices(choices)
question_timestamp = datetime.now()
input_lines = []
for line_number, line in enumerate(answer):
if len(answer) == 1:
prompt = self.PROMPT
else:
prompt = '(line {}){}'.format(line_number + 1, self.PROMPT)
student_input = format.normalize(self._input(prompt))
self._add_history(student_input)
if student_input in self.EXIT_INPUTS:
raise EOFError
if choices and student_input in choice_map:
student_input = choice_map[student_input]
correct_answer = self._verify_student_input(student_input, line)
if correct_answer:
input_lines.append(correct_answer)
else:
input_lines.append(student_input)
break
else:
correct = True
tg_id = -1
misU_count_dict = {}
rationale = "Unknown - Default Value"
if not correct:
guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines,
self.hash_key)
misU_count_dict, tg_id, printed_msg, rationale = guidance_data
else:
rationale = self.guidance_util.prompt_with_prob()
print("-- OK! --")
printed_msg = ["-- OK! --"]
self.analytics.append({
'id': unique_id,
'case_id': case_id,
'question timestamp': self.unix_time(question_timestamp),
'answer timestamp': self.unix_time(datetime.now()),
'prompt': question_prompt,
'answer': input_lines,
'correct': correct,
'treatment group id': tg_id,
'rationale': rationale,
'misU count': misU_count_dict,
'printed msg': printed_msg
})
print()
return input_lines | python | def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True):
"""Reads student input for unlocking tests until the student
answers correctly.
PARAMETERS:
unique_id -- str; the ID that is recorded with this unlocking
attempt.
case_id -- str; the ID that is recorded with this unlocking
attempt.
question_prompt -- str; the question prompt
answer -- list; a list of locked lines in a test case answer.
choices -- list or None; a list of choices. If None or an
empty list, signifies the question is not multiple
choice.
randomize -- bool; if True, randomizes the choices on first
invocation.
DESCRIPTION:
Continually prompt the student for an answer to an unlocking
question until one of the folliwng happens:
1. The student supplies the correct answer, in which case
the supplied answer is returned
2. The student aborts abnormally (either by typing 'exit()'
or using Ctrl-C/D. In this case, return None
Correctness is determined by the verify method.
RETURNS:
list; the correct solution (that the student supplied). Each element
in the list is a line of the correct output.
"""
if randomize and choices:
choices = random.sample(choices, len(choices))
correct = False
while not correct:
if choices:
assert len(answer) == 1, 'Choices must have 1 line of output'
choice_map = self._display_choices(choices)
question_timestamp = datetime.now()
input_lines = []
for line_number, line in enumerate(answer):
if len(answer) == 1:
prompt = self.PROMPT
else:
prompt = '(line {}){}'.format(line_number + 1, self.PROMPT)
student_input = format.normalize(self._input(prompt))
self._add_history(student_input)
if student_input in self.EXIT_INPUTS:
raise EOFError
if choices and student_input in choice_map:
student_input = choice_map[student_input]
correct_answer = self._verify_student_input(student_input, line)
if correct_answer:
input_lines.append(correct_answer)
else:
input_lines.append(student_input)
break
else:
correct = True
tg_id = -1
misU_count_dict = {}
rationale = "Unknown - Default Value"
if not correct:
guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines,
self.hash_key)
misU_count_dict, tg_id, printed_msg, rationale = guidance_data
else:
rationale = self.guidance_util.prompt_with_prob()
print("-- OK! --")
printed_msg = ["-- OK! --"]
self.analytics.append({
'id': unique_id,
'case_id': case_id,
'question timestamp': self.unix_time(question_timestamp),
'answer timestamp': self.unix_time(datetime.now()),
'prompt': question_prompt,
'answer': input_lines,
'correct': correct,
'treatment group id': tg_id,
'rationale': rationale,
'misU count': misU_count_dict,
'printed msg': printed_msg
})
print()
return input_lines | [
"def",
"interact",
"(",
"self",
",",
"unique_id",
",",
"case_id",
",",
"question_prompt",
",",
"answer",
",",
"choices",
"=",
"None",
",",
"randomize",
"=",
"True",
")",
":",
"if",
"randomize",
"and",
"choices",
":",
"choices",
"=",
"random",
".",
"sampl... | Reads student input for unlocking tests until the student
answers correctly.
PARAMETERS:
unique_id -- str; the ID that is recorded with this unlocking
attempt.
case_id -- str; the ID that is recorded with this unlocking
attempt.
question_prompt -- str; the question prompt
answer -- list; a list of locked lines in a test case answer.
choices -- list or None; a list of choices. If None or an
empty list, signifies the question is not multiple
choice.
randomize -- bool; if True, randomizes the choices on first
invocation.
DESCRIPTION:
Continually prompt the student for an answer to an unlocking
question until one of the folliwng happens:
1. The student supplies the correct answer, in which case
the supplied answer is returned
2. The student aborts abnormally (either by typing 'exit()'
or using Ctrl-C/D. In this case, return None
Correctness is determined by the verify method.
RETURNS:
list; the correct solution (that the student supplied). Each element
in the list is a line of the correct output. | [
"Reads",
"student",
"input",
"for",
"unlocking",
"tests",
"until",
"the",
"student",
"answers",
"correctly",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/unlock.py#L90-L184 | train | 213,522 |
okpy/ok-client | client/protocols/unlock.py | UnlockProtocol._verify_student_input | def _verify_student_input(self, student_input, locked):
"""If the student's answer is correct, returns the normalized answer.
Otherwise, returns None.
"""
guesses = [student_input]
try:
guesses.append(repr(ast.literal_eval(student_input)))
except Exception:
pass
if student_input.title() in self.SPECIAL_INPUTS:
guesses.append(student_input.title())
for guess in guesses:
if self._verify(guess, locked):
return guess | python | def _verify_student_input(self, student_input, locked):
"""If the student's answer is correct, returns the normalized answer.
Otherwise, returns None.
"""
guesses = [student_input]
try:
guesses.append(repr(ast.literal_eval(student_input)))
except Exception:
pass
if student_input.title() in self.SPECIAL_INPUTS:
guesses.append(student_input.title())
for guess in guesses:
if self._verify(guess, locked):
return guess | [
"def",
"_verify_student_input",
"(",
"self",
",",
"student_input",
",",
"locked",
")",
":",
"guesses",
"=",
"[",
"student_input",
"]",
"try",
":",
"guesses",
".",
"append",
"(",
"repr",
"(",
"ast",
".",
"literal_eval",
"(",
"student_input",
")",
")",
")",
... | If the student's answer is correct, returns the normalized answer.
Otherwise, returns None. | [
"If",
"the",
"student",
"s",
"answer",
"is",
"correct",
"returns",
"the",
"normalized",
"answer",
".",
"Otherwise",
"returns",
"None",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/unlock.py#L190-L203 | train | 213,523 |
okpy/ok-client | client/protocols/unlock.py | UnlockProtocol._display_choices | def _display_choices(self, choices):
"""Prints a mapping of numbers to choices and returns the
mapping as a dictionary.
"""
print("Choose the number of the correct choice:")
choice_map = {}
for i, choice in enumerate(choices):
i = str(i)
print('{}) {}'.format(i, format.indent(choice,
' ' * (len(i) + 2)).strip()))
choice = format.normalize(choice)
choice_map[i] = choice
return choice_map | python | def _display_choices(self, choices):
"""Prints a mapping of numbers to choices and returns the
mapping as a dictionary.
"""
print("Choose the number of the correct choice:")
choice_map = {}
for i, choice in enumerate(choices):
i = str(i)
print('{}) {}'.format(i, format.indent(choice,
' ' * (len(i) + 2)).strip()))
choice = format.normalize(choice)
choice_map[i] = choice
return choice_map | [
"def",
"_display_choices",
"(",
"self",
",",
"choices",
")",
":",
"print",
"(",
"\"Choose the number of the correct choice:\"",
")",
"choice_map",
"=",
"{",
"}",
"for",
"i",
",",
"choice",
"in",
"enumerate",
"(",
"choices",
")",
":",
"i",
"=",
"str",
"(",
... | Prints a mapping of numbers to choices and returns the
mapping as a dictionary. | [
"Prints",
"a",
"mapping",
"of",
"numbers",
"to",
"choices",
"and",
"returns",
"the",
"mapping",
"as",
"a",
"dictionary",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/unlock.py#L212-L224 | train | 213,524 |
okpy/ok-client | client/utils/timer.py | timed | def timed(timeout, fn, args=(), kargs={}):
"""For a nonzero timeout, evaluates a call expression in a separate thread.
If the timeout is 0, the expression is evaluated in the main thread.
PARAMETERS:
fn -- function; Python function to be evaluated
args -- tuple; positional arguments for fn
kargs -- dict; keyword arguments for fn
timeout -- int; number of seconds before timer interrupt
RETURN:
Result of calling fn(*args, **kargs).
RAISES:
Timeout -- if thread takes longer than timeout to execute
Error -- if calling fn raises an error, raise it
"""
if timeout == 0:
return fn(*args, **kargs)
submission = __ReturningThread(fn, args, kargs)
submission.start()
submission.join(timeout)
if submission.is_alive():
raise exceptions.Timeout(timeout)
if submission.error is not None:
raise submission.error
return submission.result | python | def timed(timeout, fn, args=(), kargs={}):
"""For a nonzero timeout, evaluates a call expression in a separate thread.
If the timeout is 0, the expression is evaluated in the main thread.
PARAMETERS:
fn -- function; Python function to be evaluated
args -- tuple; positional arguments for fn
kargs -- dict; keyword arguments for fn
timeout -- int; number of seconds before timer interrupt
RETURN:
Result of calling fn(*args, **kargs).
RAISES:
Timeout -- if thread takes longer than timeout to execute
Error -- if calling fn raises an error, raise it
"""
if timeout == 0:
return fn(*args, **kargs)
submission = __ReturningThread(fn, args, kargs)
submission.start()
submission.join(timeout)
if submission.is_alive():
raise exceptions.Timeout(timeout)
if submission.error is not None:
raise submission.error
return submission.result | [
"def",
"timed",
"(",
"timeout",
",",
"fn",
",",
"args",
"=",
"(",
")",
",",
"kargs",
"=",
"{",
"}",
")",
":",
"if",
"timeout",
"==",
"0",
":",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
"submission",
"=",
"__ReturningThread",
... | For a nonzero timeout, evaluates a call expression in a separate thread.
If the timeout is 0, the expression is evaluated in the main thread.
PARAMETERS:
fn -- function; Python function to be evaluated
args -- tuple; positional arguments for fn
kargs -- dict; keyword arguments for fn
timeout -- int; number of seconds before timer interrupt
RETURN:
Result of calling fn(*args, **kargs).
RAISES:
Timeout -- if thread takes longer than timeout to execute
Error -- if calling fn raises an error, raise it | [
"For",
"a",
"nonzero",
"timeout",
"evaluates",
"a",
"call",
"expression",
"in",
"a",
"separate",
"thread",
".",
"If",
"the",
"timeout",
"is",
"0",
"the",
"expression",
"is",
"evaluated",
"in",
"the",
"main",
"thread",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/utils/timer.py#L7-L34 | train | 213,525 |
okpy/ok-client | client/api/assignment.py | Assignment.grade | def grade(self, question, env=None, skip_locked_cases=False):
"""Runs tests for a particular question. The setup and teardown will
always be executed.
question -- str; a question name (as would be entered at the command
line
env -- dict; an environment in which to execute the tests. If
None, uses the environment of __main__. The original
dictionary is never modified; each test is given a
duplicate of env.
skip_locked_cases -- bool; if False, locked cases will be tested
Returns: dict; maps question names (str) -> results (dict). The
results dictionary contains the following fields:
- "passed": int (number of test cases passed)
- "failed": int (number of test cases failed)
- "locked": int (number of test cases locked)
"""
if env is None:
import __main__
env = __main__.__dict__
messages = {}
tests = self._resolve_specified_tests([question], all_tests=False)
for test in tests:
try:
for suite in test.suites:
suite.skip_locked_cases = skip_locked_cases
suite.console.skip_locked_cases = skip_locked_cases
suite.console.hash_key = self.name
except AttributeError:
pass
test_name = tests[0].name
grade(tests, messages, env)
return messages['grading'][test_name] | python | def grade(self, question, env=None, skip_locked_cases=False):
"""Runs tests for a particular question. The setup and teardown will
always be executed.
question -- str; a question name (as would be entered at the command
line
env -- dict; an environment in which to execute the tests. If
None, uses the environment of __main__. The original
dictionary is never modified; each test is given a
duplicate of env.
skip_locked_cases -- bool; if False, locked cases will be tested
Returns: dict; maps question names (str) -> results (dict). The
results dictionary contains the following fields:
- "passed": int (number of test cases passed)
- "failed": int (number of test cases failed)
- "locked": int (number of test cases locked)
"""
if env is None:
import __main__
env = __main__.__dict__
messages = {}
tests = self._resolve_specified_tests([question], all_tests=False)
for test in tests:
try:
for suite in test.suites:
suite.skip_locked_cases = skip_locked_cases
suite.console.skip_locked_cases = skip_locked_cases
suite.console.hash_key = self.name
except AttributeError:
pass
test_name = tests[0].name
grade(tests, messages, env)
return messages['grading'][test_name] | [
"def",
"grade",
"(",
"self",
",",
"question",
",",
"env",
"=",
"None",
",",
"skip_locked_cases",
"=",
"False",
")",
":",
"if",
"env",
"is",
"None",
":",
"import",
"__main__",
"env",
"=",
"__main__",
".",
"__dict__",
"messages",
"=",
"{",
"}",
"tests",
... | Runs tests for a particular question. The setup and teardown will
always be executed.
question -- str; a question name (as would be entered at the command
line
env -- dict; an environment in which to execute the tests. If
None, uses the environment of __main__. The original
dictionary is never modified; each test is given a
duplicate of env.
skip_locked_cases -- bool; if False, locked cases will be tested
Returns: dict; maps question names (str) -> results (dict). The
results dictionary contains the following fields:
- "passed": int (number of test cases passed)
- "failed": int (number of test cases failed)
- "locked": int (number of test cases locked) | [
"Runs",
"tests",
"for",
"a",
"particular",
"question",
".",
"The",
"setup",
"and",
"teardown",
"will",
"always",
"be",
"executed",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/api/assignment.py#L70-L103 | train | 213,526 |
okpy/ok-client | client/protocols/analytics.py | AnalyticsProtocol.run | def run(self, messages):
"""Returns some analytics about this autograder run."""
statistics = {}
statistics['time'] = str(datetime.now())
statistics['time-utc'] = str(datetime.utcnow())
statistics['unlock'] = self.args.unlock
if self.args.question:
statistics['question'] = [t.name for t in self.assignment.specified_tests]
statistics['requested-questions'] = self.args.question
if self.args.suite:
statistics['requested-suite'] = self.args.suite
if self.args.case:
statistics['requested-case'] = self.args.case
messages['analytics'] = statistics
self.log_run(messages) | python | def run(self, messages):
"""Returns some analytics about this autograder run."""
statistics = {}
statistics['time'] = str(datetime.now())
statistics['time-utc'] = str(datetime.utcnow())
statistics['unlock'] = self.args.unlock
if self.args.question:
statistics['question'] = [t.name for t in self.assignment.specified_tests]
statistics['requested-questions'] = self.args.question
if self.args.suite:
statistics['requested-suite'] = self.args.suite
if self.args.case:
statistics['requested-case'] = self.args.case
messages['analytics'] = statistics
self.log_run(messages) | [
"def",
"run",
"(",
"self",
",",
"messages",
")",
":",
"statistics",
"=",
"{",
"}",
"statistics",
"[",
"'time'",
"]",
"=",
"str",
"(",
"datetime",
".",
"now",
"(",
")",
")",
"statistics",
"[",
"'time-utc'",
"]",
"=",
"str",
"(",
"datetime",
".",
"ut... | Returns some analytics about this autograder run. | [
"Returns",
"some",
"analytics",
"about",
"this",
"autograder",
"run",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/analytics.py#L35-L52 | train | 213,527 |
okpy/ok-client | client/protocols/analytics.py | AnalyticsProtocol.log_run | def log_run(self, messages):
"""Record this run of the autograder to a local file.
If the student does not specify what question(s) the student is
running ok against, assume that the student is aiming to work on
the question with the first failed test. If a student finishes
questions 1 - N-1, the first test to fail will be N.
"""
# Load the contents of the local analytics file
history = self.read_history()
history['all_attempts'] += 1
# List of question names that the student asked to have graded
questions = messages['analytics'].get('question', [])
# The output of the grading protocol
grading = messages.get('grading')
# Attempt to figure out what the student is currently implementing
if not questions and grading:
# If questions are unspecified by the user, use the first failed test
failed = first_failed_test(self.assignment.specified_tests, grading)
logging.info('First failed test: {}'.format(failed))
if failed:
questions = [failed]
# Update question correctness status from previous attempts
for saved_q, details in history['questions'].items():
finished = details['solved']
if not finished and saved_q in grading:
scoring = grading[saved_q]
details['solved'] = is_correct(scoring)
# The question(s) that the student is testing right now.
history['question'] = questions
# Update attempt and correctness counts for the graded questions
for question in questions:
detail = history['questions']
if grading and question in grading:
scoring = is_correct(grading[question])
else:
scoring = False
# Update attempt counts or initialize counts
if question in history['questions']:
q_info = detail[question]
if grading and question in grading:
if q_info['solved'] != True:
q_info['solved'] = scoring
else:
continue # Already solved. Do not change total
q_info['attempts'] += 1
else:
detail[question] = {
'attempts': 1,
'solved': scoring
}
logging.info('Attempt %d for Question %s : %r',
history['questions'], question, scoring)
with open(self.ANALYTICS_FILE, 'wb') as f:
log.info('Saving history to %s', self.ANALYTICS_FILE)
pickle.dump(history, f)
os.fsync(f)
messages['analytics']['history'] = history | python | def log_run(self, messages):
"""Record this run of the autograder to a local file.
If the student does not specify what question(s) the student is
running ok against, assume that the student is aiming to work on
the question with the first failed test. If a student finishes
questions 1 - N-1, the first test to fail will be N.
"""
# Load the contents of the local analytics file
history = self.read_history()
history['all_attempts'] += 1
# List of question names that the student asked to have graded
questions = messages['analytics'].get('question', [])
# The output of the grading protocol
grading = messages.get('grading')
# Attempt to figure out what the student is currently implementing
if not questions and grading:
# If questions are unspecified by the user, use the first failed test
failed = first_failed_test(self.assignment.specified_tests, grading)
logging.info('First failed test: {}'.format(failed))
if failed:
questions = [failed]
# Update question correctness status from previous attempts
for saved_q, details in history['questions'].items():
finished = details['solved']
if not finished and saved_q in grading:
scoring = grading[saved_q]
details['solved'] = is_correct(scoring)
# The question(s) that the student is testing right now.
history['question'] = questions
# Update attempt and correctness counts for the graded questions
for question in questions:
detail = history['questions']
if grading and question in grading:
scoring = is_correct(grading[question])
else:
scoring = False
# Update attempt counts or initialize counts
if question in history['questions']:
q_info = detail[question]
if grading and question in grading:
if q_info['solved'] != True:
q_info['solved'] = scoring
else:
continue # Already solved. Do not change total
q_info['attempts'] += 1
else:
detail[question] = {
'attempts': 1,
'solved': scoring
}
logging.info('Attempt %d for Question %s : %r',
history['questions'], question, scoring)
with open(self.ANALYTICS_FILE, 'wb') as f:
log.info('Saving history to %s', self.ANALYTICS_FILE)
pickle.dump(history, f)
os.fsync(f)
messages['analytics']['history'] = history | [
"def",
"log_run",
"(",
"self",
",",
"messages",
")",
":",
"# Load the contents of the local analytics file",
"history",
"=",
"self",
".",
"read_history",
"(",
")",
"history",
"[",
"'all_attempts'",
"]",
"+=",
"1",
"# List of question names that the student asked to have g... | Record this run of the autograder to a local file.
If the student does not specify what question(s) the student is
running ok against, assume that the student is aiming to work on
the question with the first failed test. If a student finishes
questions 1 - N-1, the first test to fail will be N. | [
"Record",
"this",
"run",
"of",
"the",
"autograder",
"to",
"a",
"local",
"file",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/analytics.py#L78-L143 | train | 213,528 |
okpy/ok-client | client/utils/network.py | check_ssl | def check_ssl():
"""Attempts to import SSL or raises an exception."""
try:
import ssl
except:
log.warning('Error importing SSL module', stack_info=True)
print(SSL_ERROR_MESSAGE)
sys.exit(1)
else:
log.info('SSL module is available')
return ssl | python | def check_ssl():
"""Attempts to import SSL or raises an exception."""
try:
import ssl
except:
log.warning('Error importing SSL module', stack_info=True)
print(SSL_ERROR_MESSAGE)
sys.exit(1)
else:
log.info('SSL module is available')
return ssl | [
"def",
"check_ssl",
"(",
")",
":",
"try",
":",
"import",
"ssl",
"except",
":",
"log",
".",
"warning",
"(",
"'Error importing SSL module'",
",",
"stack_info",
"=",
"True",
")",
"print",
"(",
"SSL_ERROR_MESSAGE",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"el... | Attempts to import SSL or raises an exception. | [
"Attempts",
"to",
"import",
"SSL",
"or",
"raises",
"an",
"exception",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/utils/network.py#L14-L24 | train | 213,529 |
okpy/ok-client | client/protocols/grading.py | GradingProtocol.run | def run(self, messages, env=None):
"""Run gradeable tests and print results and return analytics.
RETURNS:
dict; a mapping of test name -> JSON-serializable object. It is up to
each test to determine what kind of data it wants to return as
significant for analytics. However, all tests must include the number
passed, the number of locked tests and the number of failed tests.
"""
if self.args.score or self.args.unlock or self.args.testing:
return
tests = self.assignment.specified_tests
for test in tests:
if self.args.suite and hasattr(test, 'suites'):
test.run_only = int(self.args.suite)
try:
suite = test.suites[int(self.args.suite) - 1]
except IndexError as e:
sys.exit(('python3 ok: error: '
'Suite number must be valid.({})'.format(len(test.suites))))
if self.args.case:
suite.run_only = [int(c) for c in self.args.case]
grade(tests, messages, env, verbose=self.args.verbose) | python | def run(self, messages, env=None):
"""Run gradeable tests and print results and return analytics.
RETURNS:
dict; a mapping of test name -> JSON-serializable object. It is up to
each test to determine what kind of data it wants to return as
significant for analytics. However, all tests must include the number
passed, the number of locked tests and the number of failed tests.
"""
if self.args.score or self.args.unlock or self.args.testing:
return
tests = self.assignment.specified_tests
for test in tests:
if self.args.suite and hasattr(test, 'suites'):
test.run_only = int(self.args.suite)
try:
suite = test.suites[int(self.args.suite) - 1]
except IndexError as e:
sys.exit(('python3 ok: error: '
'Suite number must be valid.({})'.format(len(test.suites))))
if self.args.case:
suite.run_only = [int(c) for c in self.args.case]
grade(tests, messages, env, verbose=self.args.verbose) | [
"def",
"run",
"(",
"self",
",",
"messages",
",",
"env",
"=",
"None",
")",
":",
"if",
"self",
".",
"args",
".",
"score",
"or",
"self",
".",
"args",
".",
"unlock",
"or",
"self",
".",
"args",
".",
"testing",
":",
"return",
"tests",
"=",
"self",
".",... | Run gradeable tests and print results and return analytics.
RETURNS:
dict; a mapping of test name -> JSON-serializable object. It is up to
each test to determine what kind of data it wants to return as
significant for analytics. However, all tests must include the number
passed, the number of locked tests and the number of failed tests. | [
"Run",
"gradeable",
"tests",
"and",
"print",
"results",
"and",
"return",
"analytics",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/protocols/grading.py#L24-L46 | train | 213,530 |
okpy/ok-client | client/sources/common/core.py | Field.coerce | def coerce(self, value):
"""Subclasses should override this method for type coercion.
Default version will simply return the argument. If the argument
is not valid, a SerializeException is raised.
For primitives like booleans, ints, floats, and strings, use
this default version to avoid unintended type conversions."""
if not self.is_valid(value):
raise ex.SerializeException('{} is not a valid value for '
'type {}'.format(value, self.__class__.__name__))
return value | python | def coerce(self, value):
"""Subclasses should override this method for type coercion.
Default version will simply return the argument. If the argument
is not valid, a SerializeException is raised.
For primitives like booleans, ints, floats, and strings, use
this default version to avoid unintended type conversions."""
if not self.is_valid(value):
raise ex.SerializeException('{} is not a valid value for '
'type {}'.format(value, self.__class__.__name__))
return value | [
"def",
"coerce",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
"(",
"value",
")",
":",
"raise",
"ex",
".",
"SerializeException",
"(",
"'{} is not a valid value for '",
"'type {}'",
".",
"format",
"(",
"value",
",",
"self",
".",... | Subclasses should override this method for type coercion.
Default version will simply return the argument. If the argument
is not valid, a SerializeException is raised.
For primitives like booleans, ints, floats, and strings, use
this default version to avoid unintended type conversions. | [
"Subclasses",
"should",
"override",
"this",
"method",
"for",
"type",
"coercion",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/core.py#L37-L48 | train | 213,531 |
okpy/ok-client | client/sources/common/core.py | Field.to_json | def to_json(self, value):
"""Subclasses should override this method for JSON encoding."""
if not self.is_valid(value):
raise ex.SerializeException('Invalid value: {}'.format(value))
return value | python | def to_json(self, value):
"""Subclasses should override this method for JSON encoding."""
if not self.is_valid(value):
raise ex.SerializeException('Invalid value: {}'.format(value))
return value | [
"def",
"to_json",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
"(",
"value",
")",
":",
"raise",
"ex",
".",
"SerializeException",
"(",
"'Invalid value: {}'",
".",
"format",
"(",
"value",
")",
")",
"return",
"value"
] | Subclasses should override this method for JSON encoding. | [
"Subclasses",
"should",
"override",
"this",
"method",
"for",
"JSON",
"encoding",
"."
] | 517f57dd76284af40ba9766e42d9222b644afd9c | https://github.com/okpy/ok-client/blob/517f57dd76284af40ba9766e42d9222b644afd9c/client/sources/common/core.py#L50-L54 | train | 213,532 |
CRS-support/ftw | ftw/http.py | HttpResponse.parse_content_encoding | def parse_content_encoding(self, response_headers, response_data):
"""
Parses a response that contains Content-Encoding to retrieve
response_data
"""
if response_headers['content-encoding'] == 'gzip':
buf = StringIO.StringIO(response_data)
zipbuf = gzip.GzipFile(fileobj=buf)
response_data = zipbuf.read()
elif response_headers['content-encoding'] == 'deflate':
data = StringIO.StringIO(zlib.decompress(response_data))
response_data = data.read()
else:
raise errors.TestError(
'Received unknown Content-Encoding',
{
'content-encoding':
str(response_headers['content-encoding']),
'function': 'http.HttpResponse.parse_content_encoding'
})
return response_data | python | def parse_content_encoding(self, response_headers, response_data):
"""
Parses a response that contains Content-Encoding to retrieve
response_data
"""
if response_headers['content-encoding'] == 'gzip':
buf = StringIO.StringIO(response_data)
zipbuf = gzip.GzipFile(fileobj=buf)
response_data = zipbuf.read()
elif response_headers['content-encoding'] == 'deflate':
data = StringIO.StringIO(zlib.decompress(response_data))
response_data = data.read()
else:
raise errors.TestError(
'Received unknown Content-Encoding',
{
'content-encoding':
str(response_headers['content-encoding']),
'function': 'http.HttpResponse.parse_content_encoding'
})
return response_data | [
"def",
"parse_content_encoding",
"(",
"self",
",",
"response_headers",
",",
"response_data",
")",
":",
"if",
"response_headers",
"[",
"'content-encoding'",
"]",
"==",
"'gzip'",
":",
"buf",
"=",
"StringIO",
".",
"StringIO",
"(",
"response_data",
")",
"zipbuf",
"=... | Parses a response that contains Content-Encoding to retrieve
response_data | [
"Parses",
"a",
"response",
"that",
"contains",
"Content",
"-",
"Encoding",
"to",
"retrieve",
"response_data"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/http.py#L41-L61 | train | 213,533 |
CRS-support/ftw | ftw/http.py | HttpResponse.process_response | def process_response(self):
"""
Parses an HTTP response after an HTTP request is sent
"""
split_response = self.response.split(self.CRLF)
response_line = split_response[0]
response_headers = {}
response_data = None
data_line = None
for line_num in range(1, len(split_response[1:])):
# CRLF represents the start of data
if split_response[line_num] == '':
data_line = line_num + 1
break
else:
# Headers are all split by ':'
header = split_response[line_num].split(':', 1)
if len(header) != 2:
raise errors.TestError(
'Did not receive a response with valid headers',
{
'header_rcvd': str(header),
'function': 'http.HttpResponse.process_response'
})
response_headers[header[0].lower()] = header[1].lstrip()
if 'set-cookie' in response_headers.keys():
try:
cookie = Cookie.SimpleCookie()
cookie.load(response_headers['set-cookie'])
except Cookie.CookieError as err:
raise errors.TestError(
'Error processing the cookie content into a SimpleCookie',
{
'msg': str(err),
'set_cookie': str(response_headers['set-cookie']),
'function': 'http.HttpResponse.process_response'
})
# if the check_for_cookie is invalid then we don't save it
if self.check_for_cookie(cookie) is False:
raise errors.TestError(
'An invalid cookie was specified',
{
'set_cookie': str(response_headers['set-cookie']),
'function': 'http.HttpResponse.process_response'
})
else:
self.cookiejar.append((cookie, self.dest_addr))
if data_line is not None and data_line < len(split_response):
response_data = self.CRLF.join(split_response[data_line:])
# if the output headers say there is encoding
if 'content-encoding' in response_headers.keys():
response_data = self.parse_content_encoding(
response_headers, response_data)
if len(response_line.split(' ', 2)) != 3:
raise errors.TestError(
'The HTTP response line returned the wrong args',
{
'response_line': str(response_line),
'function': 'http.HttpResponse.process_response'
})
try:
self.status = int(response_line.split(' ', 2)[1])
except ValueError:
raise errors.TestError(
'The status num of the response line isn\'t convertable',
{
'msg': 'This may be an HTTP 1.0 \'Simple Req\\Res\', it \
doesn\'t have HTTP headers and FTW will not parse these',
'response_line': str(response_line),
'function': 'http.HttpResponse.process_response'
})
self.status_msg = response_line.split(' ', 2)[2]
self.version = response_line.split(' ', 2)[0]
self.response_line = response_line
self.headers = response_headers
self.data = response_data | python | def process_response(self):
"""
Parses an HTTP response after an HTTP request is sent
"""
split_response = self.response.split(self.CRLF)
response_line = split_response[0]
response_headers = {}
response_data = None
data_line = None
for line_num in range(1, len(split_response[1:])):
# CRLF represents the start of data
if split_response[line_num] == '':
data_line = line_num + 1
break
else:
# Headers are all split by ':'
header = split_response[line_num].split(':', 1)
if len(header) != 2:
raise errors.TestError(
'Did not receive a response with valid headers',
{
'header_rcvd': str(header),
'function': 'http.HttpResponse.process_response'
})
response_headers[header[0].lower()] = header[1].lstrip()
if 'set-cookie' in response_headers.keys():
try:
cookie = Cookie.SimpleCookie()
cookie.load(response_headers['set-cookie'])
except Cookie.CookieError as err:
raise errors.TestError(
'Error processing the cookie content into a SimpleCookie',
{
'msg': str(err),
'set_cookie': str(response_headers['set-cookie']),
'function': 'http.HttpResponse.process_response'
})
# if the check_for_cookie is invalid then we don't save it
if self.check_for_cookie(cookie) is False:
raise errors.TestError(
'An invalid cookie was specified',
{
'set_cookie': str(response_headers['set-cookie']),
'function': 'http.HttpResponse.process_response'
})
else:
self.cookiejar.append((cookie, self.dest_addr))
if data_line is not None and data_line < len(split_response):
response_data = self.CRLF.join(split_response[data_line:])
# if the output headers say there is encoding
if 'content-encoding' in response_headers.keys():
response_data = self.parse_content_encoding(
response_headers, response_data)
if len(response_line.split(' ', 2)) != 3:
raise errors.TestError(
'The HTTP response line returned the wrong args',
{
'response_line': str(response_line),
'function': 'http.HttpResponse.process_response'
})
try:
self.status = int(response_line.split(' ', 2)[1])
except ValueError:
raise errors.TestError(
'The status num of the response line isn\'t convertable',
{
'msg': 'This may be an HTTP 1.0 \'Simple Req\\Res\', it \
doesn\'t have HTTP headers and FTW will not parse these',
'response_line': str(response_line),
'function': 'http.HttpResponse.process_response'
})
self.status_msg = response_line.split(' ', 2)[2]
self.version = response_line.split(' ', 2)[0]
self.response_line = response_line
self.headers = response_headers
self.data = response_data | [
"def",
"process_response",
"(",
"self",
")",
":",
"split_response",
"=",
"self",
".",
"response",
".",
"split",
"(",
"self",
".",
"CRLF",
")",
"response_line",
"=",
"split_response",
"[",
"0",
"]",
"response_headers",
"=",
"{",
"}",
"response_data",
"=",
"... | Parses an HTTP response after an HTTP request is sent | [
"Parses",
"an",
"HTTP",
"response",
"after",
"an",
"HTTP",
"request",
"is",
"sent"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/http.py#L142-L218 | train | 213,534 |
CRS-support/ftw | ftw/http.py | HttpUA.send_request | def send_request(self, http_request):
"""
Send a request and get response
"""
self.request_object = http_request
self.build_socket()
self.build_request()
try:
self.sock.send(self.request)
except socket.error as err:
raise errors.TestError(
'We were unable to send the request to the socket',
{
'msg': err,
'function': 'http.HttpUA.send_request'
})
finally:
self.get_response() | python | def send_request(self, http_request):
"""
Send a request and get response
"""
self.request_object = http_request
self.build_socket()
self.build_request()
try:
self.sock.send(self.request)
except socket.error as err:
raise errors.TestError(
'We were unable to send the request to the socket',
{
'msg': err,
'function': 'http.HttpUA.send_request'
})
finally:
self.get_response() | [
"def",
"send_request",
"(",
"self",
",",
"http_request",
")",
":",
"self",
".",
"request_object",
"=",
"http_request",
"self",
".",
"build_socket",
"(",
")",
"self",
".",
"build_request",
"(",
")",
"try",
":",
"self",
".",
"sock",
".",
"send",
"(",
"self... | Send a request and get response | [
"Send",
"a",
"request",
"and",
"get",
"response"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/http.py#L241-L258 | train | 213,535 |
CRS-support/ftw | ftw/http.py | HttpUA.build_socket | def build_socket(self):
"""
Generate either an HTTPS or HTTP socket
"""
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.SOCKET_TIMEOUT)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Check if TLS
if self.request_object.protocol == 'https':
self.sock = ssl.wrap_socket(self.sock, ciphers=self.CIPHERS)
self.sock.connect(
(self.request_object.dest_addr, self.request_object.port))
except socket.error as msg:
raise errors.TestError(
'Failed to connect to server',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'message': msg,
'function': 'http.HttpUA.build_socket'
}) | python | def build_socket(self):
"""
Generate either an HTTPS or HTTP socket
"""
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(self.SOCKET_TIMEOUT)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Check if TLS
if self.request_object.protocol == 'https':
self.sock = ssl.wrap_socket(self.sock, ciphers=self.CIPHERS)
self.sock.connect(
(self.request_object.dest_addr, self.request_object.port))
except socket.error as msg:
raise errors.TestError(
'Failed to connect to server',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'message': msg,
'function': 'http.HttpUA.build_socket'
}) | [
"def",
"build_socket",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"self",
".",
"sock",
".",
"settimeout",
"(",
"self",
".",
"SOCKET_TIMEOU... | Generate either an HTTPS or HTTP socket | [
"Generate",
"either",
"an",
"HTTPS",
"or",
"HTTP",
"socket"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/http.py#L260-L282 | train | 213,536 |
CRS-support/ftw | ftw/http.py | HttpUA.find_cookie | def find_cookie(self):
"""
Find a list of all cookies for a given domain
"""
return_cookies = []
origin_domain = self.request_object.dest_addr
for cookie in self.cookiejar:
for cookie_morsals in cookie[0].values():
cover_domain = cookie_morsals['domain']
if cover_domain == '':
if origin_domain == cookie[1]:
return_cookies.append(cookie[0])
else:
# Domain match algorithm
bvalue = cover_domain.lower()
hdn = origin_domain.lower()
nend = hdn.find(bvalue)
if nend is not False:
return_cookies.append(cookie[0])
return return_cookies | python | def find_cookie(self):
"""
Find a list of all cookies for a given domain
"""
return_cookies = []
origin_domain = self.request_object.dest_addr
for cookie in self.cookiejar:
for cookie_morsals in cookie[0].values():
cover_domain = cookie_morsals['domain']
if cover_domain == '':
if origin_domain == cookie[1]:
return_cookies.append(cookie[0])
else:
# Domain match algorithm
bvalue = cover_domain.lower()
hdn = origin_domain.lower()
nend = hdn.find(bvalue)
if nend is not False:
return_cookies.append(cookie[0])
return return_cookies | [
"def",
"find_cookie",
"(",
"self",
")",
":",
"return_cookies",
"=",
"[",
"]",
"origin_domain",
"=",
"self",
".",
"request_object",
".",
"dest_addr",
"for",
"cookie",
"in",
"self",
".",
"cookiejar",
":",
"for",
"cookie_morsals",
"in",
"cookie",
"[",
"0",
"]... | Find a list of all cookies for a given domain | [
"Find",
"a",
"list",
"of",
"all",
"cookies",
"for",
"a",
"given",
"domain"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/http.py#L284-L303 | train | 213,537 |
CRS-support/ftw | ftw/http.py | HttpUA.get_response | def get_response(self):
"""
Get the response from the socket
"""
self.sock.setblocking(0)
our_data = []
# Beginning time
begin = time.time()
while True:
# If we have data then if we're passed the timeout break
if our_data and time.time() - begin > self.HTTP_TIMEOUT:
break
# If we're dataless wait just a bit
elif time.time() - begin > self.HTTP_TIMEOUT * 2:
break
# Recv data
try:
data = self.sock.recv(self.RECEIVE_BYTES)
if data:
our_data.append(data)
begin = time.time()
else:
# Sleep for sometime to indicate a gap
time.sleep(self.HTTP_TIMEOUT)
except socket.error as err:
# Check if we got a timeout
if err.errno == errno.EAGAIN:
pass
# SSL will return SSLWantRead instead of EAGAIN
elif sys.platform == 'win32' and \
err.errno == errno.WSAEWOULDBLOCK:
pass
elif (self.request_object.protocol == 'https' and
err[0] == ssl.SSL_ERROR_WANT_READ):
continue
# If we didn't it's an error
else:
raise errors.TestError(
'Failed to connect to server',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'message': err,
'function': 'http.HttpUA.get_response'
})
if ''.join(our_data) == '':
raise errors.TestError(
'No response from server. Request likely timed out.',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'msg': 'Please send the request and check Wireshark',
'function': 'http.HttpUA.get_response'
})
self.response_object = HttpResponse(''.join(our_data), self)
try:
self.sock.shutdown(1)
self.sock.close()
except socket.error as err:
raise errors.TestError(
'We were unable to close the socket as expected.',
{
'msg': err,
'function': 'http.HttpUA.get_response'
}) | python | def get_response(self):
"""
Get the response from the socket
"""
self.sock.setblocking(0)
our_data = []
# Beginning time
begin = time.time()
while True:
# If we have data then if we're passed the timeout break
if our_data and time.time() - begin > self.HTTP_TIMEOUT:
break
# If we're dataless wait just a bit
elif time.time() - begin > self.HTTP_TIMEOUT * 2:
break
# Recv data
try:
data = self.sock.recv(self.RECEIVE_BYTES)
if data:
our_data.append(data)
begin = time.time()
else:
# Sleep for sometime to indicate a gap
time.sleep(self.HTTP_TIMEOUT)
except socket.error as err:
# Check if we got a timeout
if err.errno == errno.EAGAIN:
pass
# SSL will return SSLWantRead instead of EAGAIN
elif sys.platform == 'win32' and \
err.errno == errno.WSAEWOULDBLOCK:
pass
elif (self.request_object.protocol == 'https' and
err[0] == ssl.SSL_ERROR_WANT_READ):
continue
# If we didn't it's an error
else:
raise errors.TestError(
'Failed to connect to server',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'message': err,
'function': 'http.HttpUA.get_response'
})
if ''.join(our_data) == '':
raise errors.TestError(
'No response from server. Request likely timed out.',
{
'host': self.request_object.dest_addr,
'port': self.request_object.port,
'proto': self.request_object.protocol,
'msg': 'Please send the request and check Wireshark',
'function': 'http.HttpUA.get_response'
})
self.response_object = HttpResponse(''.join(our_data), self)
try:
self.sock.shutdown(1)
self.sock.close()
except socket.error as err:
raise errors.TestError(
'We were unable to close the socket as expected.',
{
'msg': err,
'function': 'http.HttpUA.get_response'
}) | [
"def",
"get_response",
"(",
"self",
")",
":",
"self",
".",
"sock",
".",
"setblocking",
"(",
"0",
")",
"our_data",
"=",
"[",
"]",
"# Beginning time",
"begin",
"=",
"time",
".",
"time",
"(",
")",
"while",
"True",
":",
"# If we have data then if we're passed th... | Get the response from the socket | [
"Get",
"the",
"response",
"from",
"the",
"socket"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/http.py#L410-L476 | train | 213,538 |
CRS-support/ftw | ftw/ruleset.py | Output.process_regex | def process_regex(self, key):
"""
Extract the value of key from dictionary if available
and process it as a python regex
"""
return re.compile(self.output_dict[key]) if \
key in self.output_dict else None | python | def process_regex(self, key):
"""
Extract the value of key from dictionary if available
and process it as a python regex
"""
return re.compile(self.output_dict[key]) if \
key in self.output_dict else None | [
"def",
"process_regex",
"(",
"self",
",",
"key",
")",
":",
"return",
"re",
".",
"compile",
"(",
"self",
".",
"output_dict",
"[",
"key",
"]",
")",
"if",
"key",
"in",
"self",
".",
"output_dict",
"else",
"None"
] | Extract the value of key from dictionary if available
and process it as a python regex | [
"Extract",
"the",
"value",
"of",
"key",
"from",
"dictionary",
"if",
"available",
"and",
"process",
"it",
"as",
"a",
"python",
"regex"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/ruleset.py#L70-L76 | train | 213,539 |
CRS-support/ftw | ftw/util.py | instantiate_database | def instantiate_database(sqlite_file='ftwj.sqlite'):
"""
Create journal database for FTW runs
"""
table_name = 'ftw'
col1 = 'rule_id'
col1_t = 'INTEGER'
col2 = 'test_id'
col2_t = 'STRING'
col3 = 'time_start'
col3_t = 'TEXT'
col4 = 'time_end'
col4_t = 'TEXT'
col5 = 'response_blob'
col5_t = 'TEXT'
col6 = 'status_code'
col6_t = 'INTEGER'
col7 = 'stage'
col7_t = 'INTEGER'
conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
q = 'CREATE TABLE {tn}({col1} {col1_t},{col2} {col2_t},{col3} {col3_t},{col4} {col4_t},{col5} {col5_t},{col6} {col6_t},{col7} {col7_t})'.format(
tn=table_name,
col1=col1, col1_t=col1_t,
col2=col2, col2_t=col2_t,
col3=col3, col3_t=col3_t,
col4=col4, col4_t=col4_t,
col5=col5, col5_t=col5_t,
col6=col6, col6_t=col6_t,
col7=col7, col7_t=col7_t)
cur.execute(q)
conn.commit()
conn.close() | python | def instantiate_database(sqlite_file='ftwj.sqlite'):
"""
Create journal database for FTW runs
"""
table_name = 'ftw'
col1 = 'rule_id'
col1_t = 'INTEGER'
col2 = 'test_id'
col2_t = 'STRING'
col3 = 'time_start'
col3_t = 'TEXT'
col4 = 'time_end'
col4_t = 'TEXT'
col5 = 'response_blob'
col5_t = 'TEXT'
col6 = 'status_code'
col6_t = 'INTEGER'
col7 = 'stage'
col7_t = 'INTEGER'
conn = sqlite3.connect(sqlite_file)
cur = conn.cursor()
q = 'CREATE TABLE {tn}({col1} {col1_t},{col2} {col2_t},{col3} {col3_t},{col4} {col4_t},{col5} {col5_t},{col6} {col6_t},{col7} {col7_t})'.format(
tn=table_name,
col1=col1, col1_t=col1_t,
col2=col2, col2_t=col2_t,
col3=col3, col3_t=col3_t,
col4=col4, col4_t=col4_t,
col5=col5, col5_t=col5_t,
col6=col6, col6_t=col6_t,
col7=col7, col7_t=col7_t)
cur.execute(q)
conn.commit()
conn.close() | [
"def",
"instantiate_database",
"(",
"sqlite_file",
"=",
"'ftwj.sqlite'",
")",
":",
"table_name",
"=",
"'ftw'",
"col1",
"=",
"'rule_id'",
"col1_t",
"=",
"'INTEGER'",
"col2",
"=",
"'test_id'",
"col2_t",
"=",
"'STRING'",
"col3",
"=",
"'time_start'",
"col3_t",
"=",
... | Create journal database for FTW runs | [
"Create",
"journal",
"database",
"for",
"FTW",
"runs"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/util.py#L18-L51 | train | 213,540 |
CRS-support/ftw | ftw/util.py | get_rulesets | def get_rulesets(ruledir, recurse):
"""
List of ruleset objects extracted from the yaml directory
"""
if os.path.isdir(ruledir) and recurse:
yaml_files = [y for x in os.walk(ruledir) for y in glob(os.path.join(x[0], '*.yaml'))]
elif os.path.isdir(ruledir) and not recurse:
yaml_files = get_files(ruledir, 'yaml')
elif os.path.isfile(ruledir):
yaml_files = [ruledir]
extracted_files = extract_yaml(yaml_files)
rulesets = []
for extracted_yaml in extracted_files:
rulesets.append(ruleset.Ruleset(extracted_yaml))
return rulesets | python | def get_rulesets(ruledir, recurse):
"""
List of ruleset objects extracted from the yaml directory
"""
if os.path.isdir(ruledir) and recurse:
yaml_files = [y for x in os.walk(ruledir) for y in glob(os.path.join(x[0], '*.yaml'))]
elif os.path.isdir(ruledir) and not recurse:
yaml_files = get_files(ruledir, 'yaml')
elif os.path.isfile(ruledir):
yaml_files = [ruledir]
extracted_files = extract_yaml(yaml_files)
rulesets = []
for extracted_yaml in extracted_files:
rulesets.append(ruleset.Ruleset(extracted_yaml))
return rulesets | [
"def",
"get_rulesets",
"(",
"ruledir",
",",
"recurse",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"ruledir",
")",
"and",
"recurse",
":",
"yaml_files",
"=",
"[",
"y",
"for",
"x",
"in",
"os",
".",
"walk",
"(",
"ruledir",
")",
"for",
"y",
... | List of ruleset objects extracted from the yaml directory | [
"List",
"of",
"ruleset",
"objects",
"extracted",
"from",
"the",
"yaml",
"directory"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/util.py#L53-L67 | train | 213,541 |
CRS-support/ftw | ftw/util.py | extract_yaml | def extract_yaml(yaml_files):
"""
Take a list of yaml_files and load them to return back
to the testing program
"""
loaded_yaml = []
for yaml_file in yaml_files:
try:
with open(yaml_file, 'r') as fd:
loaded_yaml.append(yaml.safe_load(fd))
except IOError as e:
print('Error reading file', yaml_file)
raise e
except yaml.YAMLError as e:
print('Error parsing file', yaml_file)
raise e
except Exception as e:
print('General error')
raise e
return loaded_yaml | python | def extract_yaml(yaml_files):
"""
Take a list of yaml_files and load them to return back
to the testing program
"""
loaded_yaml = []
for yaml_file in yaml_files:
try:
with open(yaml_file, 'r') as fd:
loaded_yaml.append(yaml.safe_load(fd))
except IOError as e:
print('Error reading file', yaml_file)
raise e
except yaml.YAMLError as e:
print('Error parsing file', yaml_file)
raise e
except Exception as e:
print('General error')
raise e
return loaded_yaml | [
"def",
"extract_yaml",
"(",
"yaml_files",
")",
":",
"loaded_yaml",
"=",
"[",
"]",
"for",
"yaml_file",
"in",
"yaml_files",
":",
"try",
":",
"with",
"open",
"(",
"yaml_file",
",",
"'r'",
")",
"as",
"fd",
":",
"loaded_yaml",
".",
"append",
"(",
"yaml",
".... | Take a list of yaml_files and load them to return back
to the testing program | [
"Take",
"a",
"list",
"of",
"yaml_files",
"and",
"load",
"them",
"to",
"return",
"back",
"to",
"the",
"testing",
"program"
] | 1bbfd9b702e7e65532c1fd52bc82960556cefae5 | https://github.com/CRS-support/ftw/blob/1bbfd9b702e7e65532c1fd52bc82960556cefae5/ftw/util.py#L77-L96 | train | 213,542 |
tipsi/aiozk | aiozk/protocol/response.py | Response.deserialize | def deserialize(cls, raw_bytes):
"""
Deserializes the given raw bytes into an instance.
Since this is a subclass of ``Part`` but a top-level one (i.e. no other
subclass of ``Part`` would have a ``Response`` as a part) this merely
has to parse the raw bytes and discard the resulting offset.
"""
instance, _ = cls.parse(raw_bytes, offset=0)
return instance | python | def deserialize(cls, raw_bytes):
"""
Deserializes the given raw bytes into an instance.
Since this is a subclass of ``Part`` but a top-level one (i.e. no other
subclass of ``Part`` would have a ``Response`` as a part) this merely
has to parse the raw bytes and discard the resulting offset.
"""
instance, _ = cls.parse(raw_bytes, offset=0)
return instance | [
"def",
"deserialize",
"(",
"cls",
",",
"raw_bytes",
")",
":",
"instance",
",",
"_",
"=",
"cls",
".",
"parse",
"(",
"raw_bytes",
",",
"offset",
"=",
"0",
")",
"return",
"instance"
] | Deserializes the given raw bytes into an instance.
Since this is a subclass of ``Part`` but a top-level one (i.e. no other
subclass of ``Part`` would have a ``Response`` as a part) this merely
has to parse the raw bytes and discard the resulting offset. | [
"Deserializes",
"the",
"given",
"raw",
"bytes",
"into",
"an",
"instance",
"."
] | 96d2f543de248c6d993b5bfe6621167dd1eb8223 | https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/response.py#L27-L37 | train | 213,543 |
tipsi/aiozk | aiozk/protocol/part.py | Part.render | def render(self, parts=None):
"""
Returns a two-element tuple with the ``struct`` format and values.
Iterates over the applicable sub-parts and calls `render()` on them,
accumulating the format string and values.
Optionally takes a subset of parts to render, default behavior is to
render all sub-parts belonging to the class.
"""
if not parts:
parts = self.parts
fmt = []
data = []
for name, part_class in parts:
if issubclass(part_class, Primitive):
part = part_class(getattr(self, name, None))
else:
part = getattr(self, name, None)
part_format, part_data = part.render()
fmt.extend(part_format)
data.extend(part_data)
return "".join(fmt), data | python | def render(self, parts=None):
"""
Returns a two-element tuple with the ``struct`` format and values.
Iterates over the applicable sub-parts and calls `render()` on them,
accumulating the format string and values.
Optionally takes a subset of parts to render, default behavior is to
render all sub-parts belonging to the class.
"""
if not parts:
parts = self.parts
fmt = []
data = []
for name, part_class in parts:
if issubclass(part_class, Primitive):
part = part_class(getattr(self, name, None))
else:
part = getattr(self, name, None)
part_format, part_data = part.render()
fmt.extend(part_format)
data.extend(part_data)
return "".join(fmt), data | [
"def",
"render",
"(",
"self",
",",
"parts",
"=",
"None",
")",
":",
"if",
"not",
"parts",
":",
"parts",
"=",
"self",
".",
"parts",
"fmt",
"=",
"[",
"]",
"data",
"=",
"[",
"]",
"for",
"name",
",",
"part_class",
"in",
"parts",
":",
"if",
"issubclass... | Returns a two-element tuple with the ``struct`` format and values.
Iterates over the applicable sub-parts and calls `render()` on them,
accumulating the format string and values.
Optionally takes a subset of parts to render, default behavior is to
render all sub-parts belonging to the class. | [
"Returns",
"a",
"two",
"-",
"element",
"tuple",
"with",
"the",
"struct",
"format",
"and",
"values",
"."
] | 96d2f543de248c6d993b5bfe6621167dd1eb8223 | https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/part.py#L29-L56 | train | 213,544 |
tipsi/aiozk | aiozk/protocol/primitives.py | VariablePrimitive.render | def render(self):
"""
Returns the ``struct`` format and list of the size and value.
The format is derived from the size primitive and the length of the
resulting encoded value (e.g. the format for a string of 'foo' ends
up as 'h3s'.
.. note ::
The value is expected to be string-able (wrapped in ``str()``) and is
then encoded as UTF-8.
"""
size_format = self.size_primitive.fmt
if self.value is None:
return size_format, [-1]
value = self.render_value(self.value)
size = len(value)
fmt = "%s%ds" % (size_format, size)
return fmt, [size, value] | python | def render(self):
"""
Returns the ``struct`` format and list of the size and value.
The format is derived from the size primitive and the length of the
resulting encoded value (e.g. the format for a string of 'foo' ends
up as 'h3s'.
.. note ::
The value is expected to be string-able (wrapped in ``str()``) and is
then encoded as UTF-8.
"""
size_format = self.size_primitive.fmt
if self.value is None:
return size_format, [-1]
value = self.render_value(self.value)
size = len(value)
fmt = "%s%ds" % (size_format, size)
return fmt, [size, value] | [
"def",
"render",
"(",
"self",
")",
":",
"size_format",
"=",
"self",
".",
"size_primitive",
".",
"fmt",
"if",
"self",
".",
"value",
"is",
"None",
":",
"return",
"size_format",
",",
"[",
"-",
"1",
"]",
"value",
"=",
"self",
".",
"render_value",
"(",
"s... | Returns the ``struct`` format and list of the size and value.
The format is derived from the size primitive and the length of the
resulting encoded value (e.g. the format for a string of 'foo' ends
up as 'h3s'.
.. note ::
The value is expected to be string-able (wrapped in ``str()``) and is
then encoded as UTF-8. | [
"Returns",
"the",
"struct",
"format",
"and",
"list",
"of",
"the",
"size",
"and",
"value",
"."
] | 96d2f543de248c6d993b5bfe6621167dd1eb8223 | https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/primitives.py#L65-L88 | train | 213,545 |
tipsi/aiozk | aiozk/protocol/primitives.py | Vector.of | def of(cls, part_class):
"""
Creates a new class with the ``item_class`` attribute properly set.
"""
copy = type(
"VectorOf%s" % part_class.__name__,
cls.__bases__, dict(cls.__dict__)
)
copy.item_class = part_class
return copy | python | def of(cls, part_class):
"""
Creates a new class with the ``item_class`` attribute properly set.
"""
copy = type(
"VectorOf%s" % part_class.__name__,
cls.__bases__, dict(cls.__dict__)
)
copy.item_class = part_class
return copy | [
"def",
"of",
"(",
"cls",
",",
"part_class",
")",
":",
"copy",
"=",
"type",
"(",
"\"VectorOf%s\"",
"%",
"part_class",
".",
"__name__",
",",
"cls",
".",
"__bases__",
",",
"dict",
"(",
"cls",
".",
"__dict__",
")",
")",
"copy",
".",
"item_class",
"=",
"p... | Creates a new class with the ``item_class`` attribute properly set. | [
"Creates",
"a",
"new",
"class",
"with",
"the",
"item_class",
"attribute",
"properly",
"set",
"."
] | 96d2f543de248c6d993b5bfe6621167dd1eb8223 | https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/primitives.py#L199-L209 | train | 213,546 |
tipsi/aiozk | aiozk/protocol/primitives.py | Vector.render | def render(self):
"""
Creates a composite ``struct`` format and the data to render with it.
The format and data are prefixed with a 32-bit integer denoting the
number of elements, after which each of the items in the array value
are ``render()``-ed and added to the format and data as well.
"""
value = self.value
if value is None:
value = []
fmt = [Int.fmt]
data = [len(value)]
for item_value in value:
if issubclass(self.item_class, Primitive):
item = self.item_class(item_value)
else:
item = item_value
item_format, item_data = item.render()
fmt.extend(item_format)
data.extend(item_data)
return "".join(fmt), data | python | def render(self):
"""
Creates a composite ``struct`` format and the data to render with it.
The format and data are prefixed with a 32-bit integer denoting the
number of elements, after which each of the items in the array value
are ``render()``-ed and added to the format and data as well.
"""
value = self.value
if value is None:
value = []
fmt = [Int.fmt]
data = [len(value)]
for item_value in value:
if issubclass(self.item_class, Primitive):
item = self.item_class(item_value)
else:
item = item_value
item_format, item_data = item.render()
fmt.extend(item_format)
data.extend(item_data)
return "".join(fmt), data | [
"def",
"render",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"value",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"[",
"]",
"fmt",
"=",
"[",
"Int",
".",
"fmt",
"]",
"data",
"=",
"[",
"len",
"(",
"value",
")",
"]",
"for",
"item_value",... | Creates a composite ``struct`` format and the data to render with it.
The format and data are prefixed with a 32-bit integer denoting the
number of elements, after which each of the items in the array value
are ``render()``-ed and added to the format and data as well. | [
"Creates",
"a",
"composite",
"struct",
"format",
"and",
"the",
"data",
"to",
"render",
"with",
"it",
"."
] | 96d2f543de248c6d993b5bfe6621167dd1eb8223 | https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/primitives.py#L211-L236 | train | 213,547 |
tipsi/aiozk | aiozk/protocol/primitives.py | Vector.parse | def parse(cls, buff, offset):
"""
Parses a raw buffer at offset and returns the resulting array value.
Starts off by `parse()`-ing the 32-bit element count, followed by
parsing items out of the buffer "count" times.
"""
count, offset = Int.parse(buff, offset)
values = []
for _ in range(count):
value, new_offset = cls.item_class.parse(buff, offset)
values.append(value)
offset = new_offset
return values, offset | python | def parse(cls, buff, offset):
"""
Parses a raw buffer at offset and returns the resulting array value.
Starts off by `parse()`-ing the 32-bit element count, followed by
parsing items out of the buffer "count" times.
"""
count, offset = Int.parse(buff, offset)
values = []
for _ in range(count):
value, new_offset = cls.item_class.parse(buff, offset)
values.append(value)
offset = new_offset
return values, offset | [
"def",
"parse",
"(",
"cls",
",",
"buff",
",",
"offset",
")",
":",
"count",
",",
"offset",
"=",
"Int",
".",
"parse",
"(",
"buff",
",",
"offset",
")",
"values",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"count",
")",
":",
"value",
",",
"new_... | Parses a raw buffer at offset and returns the resulting array value.
Starts off by `parse()`-ing the 32-bit element count, followed by
parsing items out of the buffer "count" times. | [
"Parses",
"a",
"raw",
"buffer",
"at",
"offset",
"and",
"returns",
"the",
"resulting",
"array",
"value",
"."
] | 96d2f543de248c6d993b5bfe6621167dd1eb8223 | https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/protocol/primitives.py#L239-L255 | train | 213,548 |
tipsi/aiozk | aiozk/recipes/allocator.py | round_robin | def round_robin(members, items):
"""
Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts.
"""
allocation = collections.defaultdict(set)
for member, item in zip(itertools.cycle(members), items):
allocation[member].add(item)
return allocation | python | def round_robin(members, items):
"""
Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts.
"""
allocation = collections.defaultdict(set)
for member, item in zip(itertools.cycle(members), items):
allocation[member].add(item)
return allocation | [
"def",
"round_robin",
"(",
"members",
",",
"items",
")",
":",
"allocation",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"for",
"member",
",",
"item",
"in",
"zip",
"(",
"itertools",
".",
"cycle",
"(",
"members",
")",
",",
"items",
")",
":",... | Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts. | [
"Default",
"allocator",
"with",
"a",
"round",
"robin",
"approach",
"."
] | 96d2f543de248c6d993b5bfe6621167dd1eb8223 | https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/recipes/allocator.py#L121-L134 | train | 213,549 |
sqlboy/fileseq | src/fileseq/utils.py | xfrange | def xfrange(start, stop, step=1, maxSize=-1):
"""
Returns a generator that yields the frames from start to stop, inclusive.
In other words it adds or subtracts a frame, as necessary, to return the
stop value as well, if the stepped range would touch that value.
Args:
start (int):
stop (int):
step (int): Note that the sign will be ignored
maxSize (int):
Returns:
generator:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded
"""
if start <= stop:
stop, step = stop + 1, abs(step)
else:
stop, step = stop - 1, -abs(step)
if maxSize >= 0:
size = lenRange(start, stop, step)
if size > maxSize:
raise exceptions.MaxSizeException(
"Size %d > %s (MAX_FRAME_SIZE)" % (size, maxSize))
# because an xrange is an odd object all its own, we wrap it in a
# generator expression to get a proper Generator
return (f for f in xrange(start, stop, step)) | python | def xfrange(start, stop, step=1, maxSize=-1):
"""
Returns a generator that yields the frames from start to stop, inclusive.
In other words it adds or subtracts a frame, as necessary, to return the
stop value as well, if the stepped range would touch that value.
Args:
start (int):
stop (int):
step (int): Note that the sign will be ignored
maxSize (int):
Returns:
generator:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded
"""
if start <= stop:
stop, step = stop + 1, abs(step)
else:
stop, step = stop - 1, -abs(step)
if maxSize >= 0:
size = lenRange(start, stop, step)
if size > maxSize:
raise exceptions.MaxSizeException(
"Size %d > %s (MAX_FRAME_SIZE)" % (size, maxSize))
# because an xrange is an odd object all its own, we wrap it in a
# generator expression to get a proper Generator
return (f for f in xrange(start, stop, step)) | [
"def",
"xfrange",
"(",
"start",
",",
"stop",
",",
"step",
"=",
"1",
",",
"maxSize",
"=",
"-",
"1",
")",
":",
"if",
"start",
"<=",
"stop",
":",
"stop",
",",
"step",
"=",
"stop",
"+",
"1",
",",
"abs",
"(",
"step",
")",
"else",
":",
"stop",
",",... | Returns a generator that yields the frames from start to stop, inclusive.
In other words it adds or subtracts a frame, as necessary, to return the
stop value as well, if the stepped range would touch that value.
Args:
start (int):
stop (int):
step (int): Note that the sign will be ignored
maxSize (int):
Returns:
generator:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: if size is exceeded | [
"Returns",
"a",
"generator",
"that",
"yields",
"the",
"frames",
"from",
"start",
"to",
"stop",
"inclusive",
".",
"In",
"other",
"words",
"it",
"adds",
"or",
"subtracts",
"a",
"frame",
"as",
"necessary",
"to",
"return",
"the",
"stop",
"value",
"as",
"well",... | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/utils.py#L63-L94 | train | 213,550 |
sqlboy/fileseq | src/fileseq/utils.py | unique | def unique(seen, *iterables):
"""
Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator:
"""
_add = seen.add
# return a generator of the unique items and the set of the seen items
# the seen set will mutate when the generator is iterated over
return (i for i in chain(*iterables) if i not in seen and not _add(i)) | python | def unique(seen, *iterables):
"""
Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator:
"""
_add = seen.add
# return a generator of the unique items and the set of the seen items
# the seen set will mutate when the generator is iterated over
return (i for i in chain(*iterables) if i not in seen and not _add(i)) | [
"def",
"unique",
"(",
"seen",
",",
"*",
"iterables",
")",
":",
"_add",
"=",
"seen",
".",
"add",
"# return a generator of the unique items and the set of the seen items",
"# the seen set will mutate when the generator is iterated over",
"return",
"(",
"i",
"for",
"i",
"in",
... | Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator: | [
"Get",
"the",
"unique",
"items",
"in",
"iterables",
"while",
"preserving",
"order",
".",
"Note",
"that",
"this",
"mutates",
"the",
"seen",
"set",
"provided",
"only",
"when",
"the",
"returned",
"generator",
"is",
"used",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/utils.py#L97-L112 | train | 213,551 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.copy | def copy(self):
"""
Create a deep copy of this sequence
Returns:
:obj:`.FileSequence`:
"""
fs = self.__class__.__new__(self.__class__)
fs.__dict__ = self.__dict__.copy()
fs._frameSet = None
if self._frameSet is not None:
fs._frameSet = self._frameSet.copy()
return fs | python | def copy(self):
"""
Create a deep copy of this sequence
Returns:
:obj:`.FileSequence`:
"""
fs = self.__class__.__new__(self.__class__)
fs.__dict__ = self.__dict__.copy()
fs._frameSet = None
if self._frameSet is not None:
fs._frameSet = self._frameSet.copy()
return fs | [
"def",
"copy",
"(",
"self",
")",
":",
"fs",
"=",
"self",
".",
"__class__",
".",
"__new__",
"(",
"self",
".",
"__class__",
")",
"fs",
".",
"__dict__",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"fs",
".",
"_frameSet",
"=",
"None",
"if",
... | Create a deep copy of this sequence
Returns:
:obj:`.FileSequence`: | [
"Create",
"a",
"deep",
"copy",
"of",
"this",
"sequence"
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L79-L91 | train | 213,552 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.format | def format(self, template="{basename}{range}{padding}{extension}"):
"""Return the file sequence as a formatted string according to
the given template.
Utilizes the python string format syntax. Available keys include:
* basename - the basename of the sequence.
* extension - the file extension of the sequence.
* start - the start frame.
* end - the end frame.
* length - the length of the frame range.
* padding - the detecting amount of padding.
* inverted - the inverted frame range. (returns "" if none)
* dirname - the directory name.
If asking for the inverted range value, and the new inverted range
exceeded :const:`fileseq.constants.MAX_FRAME_SIZE`, a ``MaxSizeException``
will be raised.
Args:
template (str):
Returns:
str:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: If frame size exceeds
:const:`fileseq.constants.MAX_FRAME_SIZE`
"""
# Potentially expensive if inverted range is large
# and user never asked for it in template
inverted = (self.invertedFrameRange() or "") if "{inverted}" in template else ""
return template.format(
basename=self.basename(),
extension=self.extension(), start=self.start(),
end=self.end(), length=len(self),
padding=self.padding(),
range=self.frameRange() or "",
inverted=inverted,
dirname=self.dirname()) | python | def format(self, template="{basename}{range}{padding}{extension}"):
"""Return the file sequence as a formatted string according to
the given template.
Utilizes the python string format syntax. Available keys include:
* basename - the basename of the sequence.
* extension - the file extension of the sequence.
* start - the start frame.
* end - the end frame.
* length - the length of the frame range.
* padding - the detecting amount of padding.
* inverted - the inverted frame range. (returns "" if none)
* dirname - the directory name.
If asking for the inverted range value, and the new inverted range
exceeded :const:`fileseq.constants.MAX_FRAME_SIZE`, a ``MaxSizeException``
will be raised.
Args:
template (str):
Returns:
str:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: If frame size exceeds
:const:`fileseq.constants.MAX_FRAME_SIZE`
"""
# Potentially expensive if inverted range is large
# and user never asked for it in template
inverted = (self.invertedFrameRange() or "") if "{inverted}" in template else ""
return template.format(
basename=self.basename(),
extension=self.extension(), start=self.start(),
end=self.end(), length=len(self),
padding=self.padding(),
range=self.frameRange() or "",
inverted=inverted,
dirname=self.dirname()) | [
"def",
"format",
"(",
"self",
",",
"template",
"=",
"\"{basename}{range}{padding}{extension}\"",
")",
":",
"# Potentially expensive if inverted range is large",
"# and user never asked for it in template",
"inverted",
"=",
"(",
"self",
".",
"invertedFrameRange",
"(",
")",
"or... | Return the file sequence as a formatted string according to
the given template.
Utilizes the python string format syntax. Available keys include:
* basename - the basename of the sequence.
* extension - the file extension of the sequence.
* start - the start frame.
* end - the end frame.
* length - the length of the frame range.
* padding - the detecting amount of padding.
* inverted - the inverted frame range. (returns "" if none)
* dirname - the directory name.
If asking for the inverted range value, and the new inverted range
exceeded :const:`fileseq.constants.MAX_FRAME_SIZE`, a ``MaxSizeException``
will be raised.
Args:
template (str):
Returns:
str:
Raises:
:class:`fileseq.exceptions.MaxSizeException`: If frame size exceeds
:const:`fileseq.constants.MAX_FRAME_SIZE` | [
"Return",
"the",
"file",
"sequence",
"as",
"a",
"formatted",
"string",
"according",
"to",
"the",
"given",
"template",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L93-L132 | train | 213,553 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.setDirname | def setDirname(self, dirname):
"""
Set a new directory name for the sequence.
Args:
dirname (str): the new directory name
"""
# Make sure the dirname always ends in
# a path separator character
sep = utils._getPathSep(dirname)
if not dirname.endswith(sep):
dirname += sep
self._dir = utils.asString(dirname) | python | def setDirname(self, dirname):
"""
Set a new directory name for the sequence.
Args:
dirname (str): the new directory name
"""
# Make sure the dirname always ends in
# a path separator character
sep = utils._getPathSep(dirname)
if not dirname.endswith(sep):
dirname += sep
self._dir = utils.asString(dirname) | [
"def",
"setDirname",
"(",
"self",
",",
"dirname",
")",
":",
"# Make sure the dirname always ends in",
"# a path separator character",
"sep",
"=",
"utils",
".",
"_getPathSep",
"(",
"dirname",
")",
"if",
"not",
"dirname",
".",
"endswith",
"(",
"sep",
")",
":",
"di... | Set a new directory name for the sequence.
Args:
dirname (str): the new directory name | [
"Set",
"a",
"new",
"directory",
"name",
"for",
"the",
"sequence",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L157-L170 | train | 213,554 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.setExtension | def setExtension(self, ext):
"""
Set a new file extension for the sequence.
Note:
A leading period will be added if none is provided.
Args:
ext (str): the new file extension
"""
if ext[0] != ".":
ext = "." + ext
self._ext = utils.asString(ext) | python | def setExtension(self, ext):
"""
Set a new file extension for the sequence.
Note:
A leading period will be added if none is provided.
Args:
ext (str): the new file extension
"""
if ext[0] != ".":
ext = "." + ext
self._ext = utils.asString(ext) | [
"def",
"setExtension",
"(",
"self",
",",
"ext",
")",
":",
"if",
"ext",
"[",
"0",
"]",
"!=",
"\".\"",
":",
"ext",
"=",
"\".\"",
"+",
"ext",
"self",
".",
"_ext",
"=",
"utils",
".",
"asString",
"(",
"ext",
")"
] | Set a new file extension for the sequence.
Note:
A leading period will be added if none is provided.
Args:
ext (str): the new file extension | [
"Set",
"a",
"new",
"file",
"extension",
"for",
"the",
"sequence",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L238-L250 | train | 213,555 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.frame | def frame(self, frame):
"""
Return a path go the given frame in the sequence. Integer or string
digits are treated as a frame number and padding is applied, all other
values are passed though.
Examples:
>>> seq.frame(1)
/foo/bar.0001.exr
>>> seq.frame("#")
/foo/bar.#.exr
Args:
frame (int or str): the desired frame number or a char to pass
through (ie. #)
Returns:
str:
"""
try:
zframe = str(int(frame)).zfill(self._zfill)
except ValueError:
zframe = frame
# There may have been no placeholder for frame IDs in
# the sequence, in which case we don't want to insert
# a frame ID
if self._zfill == 0:
zframe = ""
return "".join((self._dir, self._base, zframe, self._ext)) | python | def frame(self, frame):
"""
Return a path go the given frame in the sequence. Integer or string
digits are treated as a frame number and padding is applied, all other
values are passed though.
Examples:
>>> seq.frame(1)
/foo/bar.0001.exr
>>> seq.frame("#")
/foo/bar.#.exr
Args:
frame (int or str): the desired frame number or a char to pass
through (ie. #)
Returns:
str:
"""
try:
zframe = str(int(frame)).zfill(self._zfill)
except ValueError:
zframe = frame
# There may have been no placeholder for frame IDs in
# the sequence, in which case we don't want to insert
# a frame ID
if self._zfill == 0:
zframe = ""
return "".join((self._dir, self._base, zframe, self._ext)) | [
"def",
"frame",
"(",
"self",
",",
"frame",
")",
":",
"try",
":",
"zframe",
"=",
"str",
"(",
"int",
"(",
"frame",
")",
")",
".",
"zfill",
"(",
"self",
".",
"_zfill",
")",
"except",
"ValueError",
":",
"zframe",
"=",
"frame",
"# There may have been no pla... | Return a path go the given frame in the sequence. Integer or string
digits are treated as a frame number and padding is applied, all other
values are passed though.
Examples:
>>> seq.frame(1)
/foo/bar.0001.exr
>>> seq.frame("#")
/foo/bar.#.exr
Args:
frame (int or str): the desired frame number or a char to pass
through (ie. #)
Returns:
str: | [
"Return",
"a",
"path",
"go",
"the",
"given",
"frame",
"in",
"the",
"sequence",
".",
"Integer",
"or",
"string",
"digits",
"are",
"treated",
"as",
"a",
"frame",
"number",
"and",
"padding",
"is",
"applied",
"all",
"other",
"values",
"are",
"passed",
"though",... | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L334-L365 | train | 213,556 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.yield_sequences_in_list | def yield_sequences_in_list(paths):
"""
Yield the discrete sequences within paths. This does not try to
determine if the files actually exist on disk, it assumes you already
know that.
Args:
paths (list[str]): a list of paths
Yields:
:obj:`FileSequence`:
"""
seqs = {}
_check = DISK_RE.match
for match in ifilter(None, imap(_check, imap(utils.asString, paths))):
dirname, basename, frame, ext = match.groups()
if not basename and not ext:
continue
key = (dirname, basename, ext)
seqs.setdefault(key, set())
if frame:
seqs[key].add(frame)
for (dirname, basename, ext), frames in seqs.iteritems():
# build the FileSequence behind the scenes, rather than dupe work
seq = FileSequence.__new__(FileSequence)
seq._dir = dirname or ''
seq._base = basename or ''
seq._ext = ext or ''
if frames:
seq._frameSet = FrameSet(set(imap(int, frames))) if frames else None
seq._pad = FileSequence.getPaddingChars(min(imap(len, frames)))
else:
seq._frameSet = None
seq._pad = ''
seq.__init__(str(seq))
yield seq | python | def yield_sequences_in_list(paths):
"""
Yield the discrete sequences within paths. This does not try to
determine if the files actually exist on disk, it assumes you already
know that.
Args:
paths (list[str]): a list of paths
Yields:
:obj:`FileSequence`:
"""
seqs = {}
_check = DISK_RE.match
for match in ifilter(None, imap(_check, imap(utils.asString, paths))):
dirname, basename, frame, ext = match.groups()
if not basename and not ext:
continue
key = (dirname, basename, ext)
seqs.setdefault(key, set())
if frame:
seqs[key].add(frame)
for (dirname, basename, ext), frames in seqs.iteritems():
# build the FileSequence behind the scenes, rather than dupe work
seq = FileSequence.__new__(FileSequence)
seq._dir = dirname or ''
seq._base = basename or ''
seq._ext = ext or ''
if frames:
seq._frameSet = FrameSet(set(imap(int, frames))) if frames else None
seq._pad = FileSequence.getPaddingChars(min(imap(len, frames)))
else:
seq._frameSet = None
seq._pad = ''
seq.__init__(str(seq))
yield seq | [
"def",
"yield_sequences_in_list",
"(",
"paths",
")",
":",
"seqs",
"=",
"{",
"}",
"_check",
"=",
"DISK_RE",
".",
"match",
"for",
"match",
"in",
"ifilter",
"(",
"None",
",",
"imap",
"(",
"_check",
",",
"imap",
"(",
"utils",
".",
"asString",
",",
"paths",... | Yield the discrete sequences within paths. This does not try to
determine if the files actually exist on disk, it assumes you already
know that.
Args:
paths (list[str]): a list of paths
Yields:
:obj:`FileSequence`: | [
"Yield",
"the",
"discrete",
"sequences",
"within",
"paths",
".",
"This",
"does",
"not",
"try",
"to",
"determine",
"if",
"the",
"files",
"actually",
"exist",
"on",
"disk",
"it",
"assumes",
"you",
"already",
"know",
"that",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L470-L507 | train | 213,557 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.findSequencesOnDisk | def findSequencesOnDisk(cls, pattern, include_hidden=False, strictPadding=False):
"""
Yield the sequences found in the given directory.
Examples:
>>> findSequencesOnDisk('/path/to/files')
The `pattern` can also specify glob-like shell wildcards including the following:
* ``?`` - 1 wildcard character
* ``*`` - 1 or more wildcard character
* ``{foo,bar}`` - either 'foo' or 'bar'
Exact frame ranges are not considered, and padding characters are converted to
wildcards (``#`` or ``@``)
Examples:
>>> findSequencesOnDisk('/path/to/files/image_stereo_{left,right}.#.jpg')
>>> findSequencesOnDisk('/path/to/files/imag?_*_{left,right}.@@@.jpg', strictPadding=True)
Args:
pattern (str): directory to scan, or pattern to filter in directory
include_hidden (bool): if true, show .hidden files as well
strictPadding (bool): if True, ignore files with padding length different from pattern
Returns:
list:
"""
# reserve some functions we're going to need quick access to
_not_hidden = lambda f: not f.startswith('.')
_match_pattern = None
_filter_padding = None
_join = os.path.join
seq = None
dirpath = pattern
# Support the pattern defining a filter for the files
# in the existing directory
if not os.path.isdir(pattern):
dirpath, filepat = os.path.split(pattern)
if not os.path.isdir(dirpath):
return []
# Start building a regex for filtering files
seq = cls(filepat)
patt = seq.basename().replace('.', r'\.')
if seq.padding():
patt += '\d+'
if seq.extension():
patt += seq.extension()
# Convert braces groups into regex capture groups
view = bytearray(patt)
matches = re.finditer(r'{(.*?)(?:,(.*?))*}', patt)
for match in reversed(list(matches)):
i, j = match.span()
view[i:j] = '(%s)' % '|'.join([m.strip() for m in match.groups()])
view = view.replace('*', '.*')
view = view.replace('?', '.')
view += '$'
try:
_match_pattern = re.compile(str(view)).match
except re.error:
msg = 'Invalid file pattern: {}'.format(filepat)
raise FileSeqException(msg)
if seq.padding() and strictPadding:
_filter_padding = functools.partial(cls._filterByPaddingNum, num=seq.zfill())
# Get just the immediate files under the dir.
# Avoids testing the os.listdir() for files as
# a second step.
ret = next(os.walk(dirpath), None)
files = ret[-1] if ret else []
# collapse some generators to get us the files that match our regex
if not include_hidden:
files = ifilter(_not_hidden, files)
# Filter by files that match the provided file pattern
if _match_pattern:
files = ifilter(_match_pattern, files)
# Filter by files that match the frame padding in the file pattern
if _filter_padding:
# returns a generator
files = _filter_padding(files)
# Ensure our dirpath ends with a path separator, so
# that we can control which sep is used during the
# os.path.join
sep = utils._getPathSep(dirpath)
if not dirpath.endswith(sep):
dirpath += sep
files = (_join(dirpath, f) for f in files)
files = list(files)
seqs = list(FileSequence.yield_sequences_in_list(files))
if _filter_padding and seq:
pad = cls.conformPadding(seq.padding())
# strict padding should preserve the original padding
# characters in the found sequences.
for s in seqs:
s.setPadding(pad)
return seqs | python | def findSequencesOnDisk(cls, pattern, include_hidden=False, strictPadding=False):
"""
Yield the sequences found in the given directory.
Examples:
>>> findSequencesOnDisk('/path/to/files')
The `pattern` can also specify glob-like shell wildcards including the following:
* ``?`` - 1 wildcard character
* ``*`` - 1 or more wildcard character
* ``{foo,bar}`` - either 'foo' or 'bar'
Exact frame ranges are not considered, and padding characters are converted to
wildcards (``#`` or ``@``)
Examples:
>>> findSequencesOnDisk('/path/to/files/image_stereo_{left,right}.#.jpg')
>>> findSequencesOnDisk('/path/to/files/imag?_*_{left,right}.@@@.jpg', strictPadding=True)
Args:
pattern (str): directory to scan, or pattern to filter in directory
include_hidden (bool): if true, show .hidden files as well
strictPadding (bool): if True, ignore files with padding length different from pattern
Returns:
list:
"""
# reserve some functions we're going to need quick access to
_not_hidden = lambda f: not f.startswith('.')
_match_pattern = None
_filter_padding = None
_join = os.path.join
seq = None
dirpath = pattern
# Support the pattern defining a filter for the files
# in the existing directory
if not os.path.isdir(pattern):
dirpath, filepat = os.path.split(pattern)
if not os.path.isdir(dirpath):
return []
# Start building a regex for filtering files
seq = cls(filepat)
patt = seq.basename().replace('.', r'\.')
if seq.padding():
patt += '\d+'
if seq.extension():
patt += seq.extension()
# Convert braces groups into regex capture groups
view = bytearray(patt)
matches = re.finditer(r'{(.*?)(?:,(.*?))*}', patt)
for match in reversed(list(matches)):
i, j = match.span()
view[i:j] = '(%s)' % '|'.join([m.strip() for m in match.groups()])
view = view.replace('*', '.*')
view = view.replace('?', '.')
view += '$'
try:
_match_pattern = re.compile(str(view)).match
except re.error:
msg = 'Invalid file pattern: {}'.format(filepat)
raise FileSeqException(msg)
if seq.padding() and strictPadding:
_filter_padding = functools.partial(cls._filterByPaddingNum, num=seq.zfill())
# Get just the immediate files under the dir.
# Avoids testing the os.listdir() for files as
# a second step.
ret = next(os.walk(dirpath), None)
files = ret[-1] if ret else []
# collapse some generators to get us the files that match our regex
if not include_hidden:
files = ifilter(_not_hidden, files)
# Filter by files that match the provided file pattern
if _match_pattern:
files = ifilter(_match_pattern, files)
# Filter by files that match the frame padding in the file pattern
if _filter_padding:
# returns a generator
files = _filter_padding(files)
# Ensure our dirpath ends with a path separator, so
# that we can control which sep is used during the
# os.path.join
sep = utils._getPathSep(dirpath)
if not dirpath.endswith(sep):
dirpath += sep
files = (_join(dirpath, f) for f in files)
files = list(files)
seqs = list(FileSequence.yield_sequences_in_list(files))
if _filter_padding and seq:
pad = cls.conformPadding(seq.padding())
# strict padding should preserve the original padding
# characters in the found sequences.
for s in seqs:
s.setPadding(pad)
return seqs | [
"def",
"findSequencesOnDisk",
"(",
"cls",
",",
"pattern",
",",
"include_hidden",
"=",
"False",
",",
"strictPadding",
"=",
"False",
")",
":",
"# reserve some functions we're going to need quick access to",
"_not_hidden",
"=",
"lambda",
"f",
":",
"not",
"f",
".",
"sta... | Yield the sequences found in the given directory.
Examples:
>>> findSequencesOnDisk('/path/to/files')
The `pattern` can also specify glob-like shell wildcards including the following:
* ``?`` - 1 wildcard character
* ``*`` - 1 or more wildcard character
* ``{foo,bar}`` - either 'foo' or 'bar'
Exact frame ranges are not considered, and padding characters are converted to
wildcards (``#`` or ``@``)
Examples:
>>> findSequencesOnDisk('/path/to/files/image_stereo_{left,right}.#.jpg')
>>> findSequencesOnDisk('/path/to/files/imag?_*_{left,right}.@@@.jpg', strictPadding=True)
Args:
pattern (str): directory to scan, or pattern to filter in directory
include_hidden (bool): if true, show .hidden files as well
strictPadding (bool): if True, ignore files with padding length different from pattern
Returns:
list: | [
"Yield",
"the",
"sequences",
"found",
"in",
"the",
"given",
"directory",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L525-L633 | train | 213,558 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.findSequenceOnDisk | def findSequenceOnDisk(cls, pattern, strictPadding=False):
"""
Search for a specific sequence on disk.
The padding characters used in the `pattern` are used to filter the
frame values of the files on disk (if `strictPadding` is True).
Examples:
Find sequence matching basename and extension, and a wildcard for
any frame.
returns bar.1.exr bar.10.exr, bar.100.exr, bar.1000.exr, inclusive
>>> findSequenceOnDisk("seq/bar@@@@.exr")
Find exactly 4-padded sequence, i.e. seq/bar1-100#.exr
returns only frames bar1000.exr through bar9999.exr
>>> findSequenceOnDisk("seq/bar#.exr", strictPadding=True)
Args:
pattern (str): the sequence pattern being searched for
strictPadding (bool): if True, ignore files with padding length different from `pattern`
Returns:
str:
Raises:
:class:`.FileSeqException`: if no sequence is found on disk
"""
seq = cls(pattern)
if seq.frameRange() == '' and seq.padding() == '':
if os.path.isfile(pattern):
return seq
patt = seq.format('{dirname}{basename}*{extension}')
ext = seq.extension()
basename = seq.basename()
pad = seq.padding()
globbed = iglob(patt)
if pad and strictPadding:
globbed = cls._filterByPaddingNum(globbed, seq.zfill())
pad = cls.conformPadding(pad)
matches = cls.yield_sequences_in_list(globbed)
for match in matches:
if match.basename() == basename and match.extension() == ext:
if pad and strictPadding:
match.setPadding(pad)
return match
msg = 'no sequence found on disk matching {0}'
raise FileSeqException(msg.format(pattern)) | python | def findSequenceOnDisk(cls, pattern, strictPadding=False):
"""
Search for a specific sequence on disk.
The padding characters used in the `pattern` are used to filter the
frame values of the files on disk (if `strictPadding` is True).
Examples:
Find sequence matching basename and extension, and a wildcard for
any frame.
returns bar.1.exr bar.10.exr, bar.100.exr, bar.1000.exr, inclusive
>>> findSequenceOnDisk("seq/bar@@@@.exr")
Find exactly 4-padded sequence, i.e. seq/bar1-100#.exr
returns only frames bar1000.exr through bar9999.exr
>>> findSequenceOnDisk("seq/bar#.exr", strictPadding=True)
Args:
pattern (str): the sequence pattern being searched for
strictPadding (bool): if True, ignore files with padding length different from `pattern`
Returns:
str:
Raises:
:class:`.FileSeqException`: if no sequence is found on disk
"""
seq = cls(pattern)
if seq.frameRange() == '' and seq.padding() == '':
if os.path.isfile(pattern):
return seq
patt = seq.format('{dirname}{basename}*{extension}')
ext = seq.extension()
basename = seq.basename()
pad = seq.padding()
globbed = iglob(patt)
if pad and strictPadding:
globbed = cls._filterByPaddingNum(globbed, seq.zfill())
pad = cls.conformPadding(pad)
matches = cls.yield_sequences_in_list(globbed)
for match in matches:
if match.basename() == basename and match.extension() == ext:
if pad and strictPadding:
match.setPadding(pad)
return match
msg = 'no sequence found on disk matching {0}'
raise FileSeqException(msg.format(pattern)) | [
"def",
"findSequenceOnDisk",
"(",
"cls",
",",
"pattern",
",",
"strictPadding",
"=",
"False",
")",
":",
"seq",
"=",
"cls",
"(",
"pattern",
")",
"if",
"seq",
".",
"frameRange",
"(",
")",
"==",
"''",
"and",
"seq",
".",
"padding",
"(",
")",
"==",
"''",
... | Search for a specific sequence on disk.
The padding characters used in the `pattern` are used to filter the
frame values of the files on disk (if `strictPadding` is True).
Examples:
Find sequence matching basename and extension, and a wildcard for
any frame.
returns bar.1.exr bar.10.exr, bar.100.exr, bar.1000.exr, inclusive
>>> findSequenceOnDisk("seq/bar@@@@.exr")
Find exactly 4-padded sequence, i.e. seq/bar1-100#.exr
returns only frames bar1000.exr through bar9999.exr
>>> findSequenceOnDisk("seq/bar#.exr", strictPadding=True)
Args:
pattern (str): the sequence pattern being searched for
strictPadding (bool): if True, ignore files with padding length different from `pattern`
Returns:
str:
Raises:
:class:`.FileSeqException`: if no sequence is found on disk | [
"Search",
"for",
"a",
"specific",
"sequence",
"on",
"disk",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L636-L690 | train | 213,559 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence._filterByPaddingNum | def _filterByPaddingNum(cls, iterable, num):
"""
Yield only path elements from iterable which have a frame
padding that matches the given target padding number
Args:
iterable (collections.Iterable):
num (int):
Yields:
str:
"""
_check = DISK_RE.match
for item in iterable:
# Add a filter for paths that don't match the frame
# padding of a given number
matches = _check(item)
if not matches:
if num <= 0:
# Not a sequence pattern, but we were asked
# to match on a zero padding
yield item
continue
frame = matches.group(3) or ''
if not frame:
if num <= 0:
# No frame value was parsed, but we were asked
# to match on a zero padding
yield item
continue
# We have a frame number
if frame[0] == '0' or frame[:2] == '-0':
if len(frame) == num:
# A frame leading with '0' is explicitly
# padded and can only be a match if its exactly
# the target padding number
yield item
continue
if len(frame) >= num:
# A frame that does not lead with '0' can match
# a padding width >= to the target padding number
yield item
continue | python | def _filterByPaddingNum(cls, iterable, num):
"""
Yield only path elements from iterable which have a frame
padding that matches the given target padding number
Args:
iterable (collections.Iterable):
num (int):
Yields:
str:
"""
_check = DISK_RE.match
for item in iterable:
# Add a filter for paths that don't match the frame
# padding of a given number
matches = _check(item)
if not matches:
if num <= 0:
# Not a sequence pattern, but we were asked
# to match on a zero padding
yield item
continue
frame = matches.group(3) or ''
if not frame:
if num <= 0:
# No frame value was parsed, but we were asked
# to match on a zero padding
yield item
continue
# We have a frame number
if frame[0] == '0' or frame[:2] == '-0':
if len(frame) == num:
# A frame leading with '0' is explicitly
# padded and can only be a match if its exactly
# the target padding number
yield item
continue
if len(frame) >= num:
# A frame that does not lead with '0' can match
# a padding width >= to the target padding number
yield item
continue | [
"def",
"_filterByPaddingNum",
"(",
"cls",
",",
"iterable",
",",
"num",
")",
":",
"_check",
"=",
"DISK_RE",
".",
"match",
"for",
"item",
"in",
"iterable",
":",
"# Add a filter for paths that don't match the frame",
"# padding of a given number",
"matches",
"=",
"_check... | Yield only path elements from iterable which have a frame
padding that matches the given target padding number
Args:
iterable (collections.Iterable):
num (int):
Yields:
str: | [
"Yield",
"only",
"path",
"elements",
"from",
"iterable",
"which",
"have",
"a",
"frame",
"padding",
"that",
"matches",
"the",
"given",
"target",
"padding",
"number"
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L693-L742 | train | 213,560 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.getPaddingNum | def getPaddingNum(chars):
"""
Given a supported group of padding characters, return the amount of padding.
Args:
chars (str): a supported group of padding characters
Returns:
int:
Raises:
ValueError: if unsupported padding character is detected
"""
match = PRINTF_SYNTAX_PADDING_RE.match(chars)
if match:
return int(match.group(1))
try:
return sum([PAD_MAP[char] for char in chars])
except KeyError:
msg = "Detected an unsupported padding character: \"{}\"."
msg += " Supported padding characters: {} or printf syntax padding"
msg += " %<int>d"
raise ValueError(msg.format(char, str(PAD_MAP.keys()))) | python | def getPaddingNum(chars):
"""
Given a supported group of padding characters, return the amount of padding.
Args:
chars (str): a supported group of padding characters
Returns:
int:
Raises:
ValueError: if unsupported padding character is detected
"""
match = PRINTF_SYNTAX_PADDING_RE.match(chars)
if match:
return int(match.group(1))
try:
return sum([PAD_MAP[char] for char in chars])
except KeyError:
msg = "Detected an unsupported padding character: \"{}\"."
msg += " Supported padding characters: {} or printf syntax padding"
msg += " %<int>d"
raise ValueError(msg.format(char, str(PAD_MAP.keys()))) | [
"def",
"getPaddingNum",
"(",
"chars",
")",
":",
"match",
"=",
"PRINTF_SYNTAX_PADDING_RE",
".",
"match",
"(",
"chars",
")",
"if",
"match",
":",
"return",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"try",
":",
"return",
"sum",
"(",
"[",
"PA... | Given a supported group of padding characters, return the amount of padding.
Args:
chars (str): a supported group of padding characters
Returns:
int:
Raises:
ValueError: if unsupported padding character is detected | [
"Given",
"a",
"supported",
"group",
"of",
"padding",
"characters",
"return",
"the",
"amount",
"of",
"padding",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L763-L786 | train | 213,561 |
sqlboy/fileseq | src/fileseq/filesequence.py | FileSequence.conformPadding | def conformPadding(cls, chars):
"""
Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters
"""
pad = chars
if pad and pad[0] not in PAD_MAP:
pad = cls.getPaddingChars(cls.getPaddingNum(pad))
return pad | python | def conformPadding(cls, chars):
"""
Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters
"""
pad = chars
if pad and pad[0] not in PAD_MAP:
pad = cls.getPaddingChars(cls.getPaddingNum(pad))
return pad | [
"def",
"conformPadding",
"(",
"cls",
",",
"chars",
")",
":",
"pad",
"=",
"chars",
"if",
"pad",
"and",
"pad",
"[",
"0",
"]",
"not",
"in",
"PAD_MAP",
":",
"pad",
"=",
"cls",
".",
"getPaddingChars",
"(",
"cls",
".",
"getPaddingNum",
"(",
"pad",
")",
"... | Ensure alternate input padding formats are conformed
to formats defined in PAD_MAP
If chars is already a format defined in PAD_MAP, then
it is returned unmodified.
Example::
'#' -> '#'
'@@@@' -> '@@@@'
'%04d' -> '#'
Args:
chars (str): input padding chars
Returns:
str: conformed padding chars
Raises:
ValueError: If chars contains invalid padding characters | [
"Ensure",
"alternate",
"input",
"padding",
"formats",
"are",
"conformed",
"to",
"formats",
"defined",
"in",
"PAD_MAP"
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/filesequence.py#L789-L814 | train | 213,562 |
sqlboy/fileseq | src/fileseq/frameset.py | FrameSet._cast_to_frameset | def _cast_to_frameset(cls, other):
"""
Private method to simplify comparison operations.
Args:
other (:class:`FrameSet` or set or frozenset or or iterable): item to be compared
Returns:
:class:`FrameSet`
Raises:
:class:`NotImplemented`: if a comparison is impossible
"""
if isinstance(other, FrameSet):
return other
try:
return FrameSet(other)
except Exception:
return NotImplemented | python | def _cast_to_frameset(cls, other):
"""
Private method to simplify comparison operations.
Args:
other (:class:`FrameSet` or set or frozenset or or iterable): item to be compared
Returns:
:class:`FrameSet`
Raises:
:class:`NotImplemented`: if a comparison is impossible
"""
if isinstance(other, FrameSet):
return other
try:
return FrameSet(other)
except Exception:
return NotImplemented | [
"def",
"_cast_to_frameset",
"(",
"cls",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"FrameSet",
")",
":",
"return",
"other",
"try",
":",
"return",
"FrameSet",
"(",
"other",
")",
"except",
"Exception",
":",
"return",
"NotImplemented"
] | Private method to simplify comparison operations.
Args:
other (:class:`FrameSet` or set or frozenset or or iterable): item to be compared
Returns:
:class:`FrameSet`
Raises:
:class:`NotImplemented`: if a comparison is impossible | [
"Private",
"method",
"to",
"simplify",
"comparison",
"operations",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/frameset.py#L243-L261 | train | 213,563 |
sqlboy/fileseq | src/fileseq/frameset.py | FrameSet.issubset | def issubset(self, other):
"""
Check if the contents of `self` is a subset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items <= other.items | python | def issubset(self, other):
"""
Check if the contents of `self` is a subset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items <= other.items | [
"def",
"issubset",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"self",
".",
"_cast_to_frameset",
"(",
"other",
")",
"if",
"other",
"is",
"NotImplemented",
":",
"return",
"NotImplemented",
"return",
"self",
".",
"items",
"<=",
"other",
".",
"items"
] | Check if the contents of `self` is a subset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet` | [
"Check",
"if",
"the",
"contents",
"of",
"self",
"is",
"a",
"subset",
"of",
"the",
"contents",
"of",
"other",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/frameset.py#L818-L833 | train | 213,564 |
sqlboy/fileseq | src/fileseq/frameset.py | FrameSet.issuperset | def issuperset(self, other):
"""
Check if the contents of `self` is a superset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items >= other.items | python | def issuperset(self, other):
"""
Check if the contents of `self` is a superset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet`
"""
other = self._cast_to_frameset(other)
if other is NotImplemented:
return NotImplemented
return self.items >= other.items | [
"def",
"issuperset",
"(",
"self",
",",
"other",
")",
":",
"other",
"=",
"self",
".",
"_cast_to_frameset",
"(",
"other",
")",
"if",
"other",
"is",
"NotImplemented",
":",
"return",
"NotImplemented",
"return",
"self",
".",
"items",
">=",
"other",
".",
"items"... | Check if the contents of `self` is a superset of the contents of
`other.`
Args:
other (:class:`FrameSet`):
Returns:
bool:
:class:`NotImplemented`: if `other` fails to convert to a :class:`FrameSet` | [
"Check",
"if",
"the",
"contents",
"of",
"self",
"is",
"a",
"superset",
"of",
"the",
"contents",
"of",
"other",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/frameset.py#L835-L850 | train | 213,565 |
sqlboy/fileseq | src/fileseq/frameset.py | FrameSet._maxSizeCheck | def _maxSizeCheck(cls, obj):
"""
Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE
Args:
obj (numbers.Number or collection):
Raises:
:class:`fileseq.exceptions.MaxSizeException`:
"""
fail = False
size = 0
if isinstance(obj, numbers.Number):
if obj > constants.MAX_FRAME_SIZE:
fail = True
size = obj
elif hasattr(obj, '__len__'):
size = len(obj)
fail = size > constants.MAX_FRAME_SIZE
if fail:
raise MaxSizeException('Frame size %s > %s (MAX_FRAME_SIZE)' \
% (size, constants.MAX_FRAME_SIZE)) | python | def _maxSizeCheck(cls, obj):
"""
Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE
Args:
obj (numbers.Number or collection):
Raises:
:class:`fileseq.exceptions.MaxSizeException`:
"""
fail = False
size = 0
if isinstance(obj, numbers.Number):
if obj > constants.MAX_FRAME_SIZE:
fail = True
size = obj
elif hasattr(obj, '__len__'):
size = len(obj)
fail = size > constants.MAX_FRAME_SIZE
if fail:
raise MaxSizeException('Frame size %s > %s (MAX_FRAME_SIZE)' \
% (size, constants.MAX_FRAME_SIZE)) | [
"def",
"_maxSizeCheck",
"(",
"cls",
",",
"obj",
")",
":",
"fail",
"=",
"False",
"size",
"=",
"0",
"if",
"isinstance",
"(",
"obj",
",",
"numbers",
".",
"Number",
")",
":",
"if",
"obj",
">",
"constants",
".",
"MAX_FRAME_SIZE",
":",
"fail",
"=",
"True",... | Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE
Args:
obj (numbers.Number or collection):
Raises:
:class:`fileseq.exceptions.MaxSizeException`: | [
"Raise",
"a",
"MaxSizeException",
"if",
"obj",
"exceeds",
"MAX_FRAME_SIZE"
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/frameset.py#L921-L945 | train | 213,566 |
sqlboy/fileseq | src/fileseq/frameset.py | FrameSet.padFrameRange | def padFrameRange(frange, zfill):
"""
Return the zero-padded version of the frame range string.
Args:
frange (str): a frame range to test
zfill (int):
Returns:
str:
"""
def _do_pad(match):
"""
Substitutes padded for unpadded frames.
"""
result = list(match.groups())
result[1] = pad(result[1], zfill)
if result[4]:
result[4] = pad(result[4], zfill)
return ''.join((i for i in result if i))
return PAD_RE.sub(_do_pad, frange) | python | def padFrameRange(frange, zfill):
"""
Return the zero-padded version of the frame range string.
Args:
frange (str): a frame range to test
zfill (int):
Returns:
str:
"""
def _do_pad(match):
"""
Substitutes padded for unpadded frames.
"""
result = list(match.groups())
result[1] = pad(result[1], zfill)
if result[4]:
result[4] = pad(result[4], zfill)
return ''.join((i for i in result if i))
return PAD_RE.sub(_do_pad, frange) | [
"def",
"padFrameRange",
"(",
"frange",
",",
"zfill",
")",
":",
"def",
"_do_pad",
"(",
"match",
")",
":",
"\"\"\"\n Substitutes padded for unpadded frames.\n \"\"\"",
"result",
"=",
"list",
"(",
"match",
".",
"groups",
"(",
")",
")",
"result",
... | Return the zero-padded version of the frame range string.
Args:
frange (str): a frame range to test
zfill (int):
Returns:
str: | [
"Return",
"the",
"zero",
"-",
"padded",
"version",
"of",
"the",
"frame",
"range",
"string",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/frameset.py#L974-L994 | train | 213,567 |
sqlboy/fileseq | src/fileseq/frameset.py | FrameSet.framesToFrameRanges | def framesToFrameRanges(frames, zfill=0):
"""
Converts a sequence of frames to a series of padded
frame range strings.
Args:
frames (collections.Iterable): sequence of frames to process
zfill (int): width for zero padding
Yields:
str:
"""
_build = FrameSet._build_frange_part
curr_start = None
curr_stride = None
curr_frame = None
last_frame = None
curr_count = 0
for curr_frame in frames:
if curr_start is None:
curr_start = curr_frame
last_frame = curr_frame
curr_count += 1
continue
if curr_stride is None:
curr_stride = abs(curr_frame-curr_start)
new_stride = abs(curr_frame-last_frame)
if curr_stride == new_stride:
last_frame = curr_frame
curr_count += 1
elif curr_count == 2 and curr_stride != 1:
yield _build(curr_start, curr_start, None, zfill)
curr_start = last_frame
curr_stride = new_stride
last_frame = curr_frame
else:
yield _build(curr_start, last_frame, curr_stride, zfill)
curr_stride = None
curr_start = curr_frame
last_frame = curr_frame
curr_count = 1
if curr_count == 2 and curr_stride != 1:
yield _build(curr_start, curr_start, None, zfill)
yield _build(curr_frame, curr_frame, None, zfill)
else:
yield _build(curr_start, curr_frame, curr_stride, zfill) | python | def framesToFrameRanges(frames, zfill=0):
"""
Converts a sequence of frames to a series of padded
frame range strings.
Args:
frames (collections.Iterable): sequence of frames to process
zfill (int): width for zero padding
Yields:
str:
"""
_build = FrameSet._build_frange_part
curr_start = None
curr_stride = None
curr_frame = None
last_frame = None
curr_count = 0
for curr_frame in frames:
if curr_start is None:
curr_start = curr_frame
last_frame = curr_frame
curr_count += 1
continue
if curr_stride is None:
curr_stride = abs(curr_frame-curr_start)
new_stride = abs(curr_frame-last_frame)
if curr_stride == new_stride:
last_frame = curr_frame
curr_count += 1
elif curr_count == 2 and curr_stride != 1:
yield _build(curr_start, curr_start, None, zfill)
curr_start = last_frame
curr_stride = new_stride
last_frame = curr_frame
else:
yield _build(curr_start, last_frame, curr_stride, zfill)
curr_stride = None
curr_start = curr_frame
last_frame = curr_frame
curr_count = 1
if curr_count == 2 and curr_stride != 1:
yield _build(curr_start, curr_start, None, zfill)
yield _build(curr_frame, curr_frame, None, zfill)
else:
yield _build(curr_start, curr_frame, curr_stride, zfill) | [
"def",
"framesToFrameRanges",
"(",
"frames",
",",
"zfill",
"=",
"0",
")",
":",
"_build",
"=",
"FrameSet",
".",
"_build_frange_part",
"curr_start",
"=",
"None",
"curr_stride",
"=",
"None",
"curr_frame",
"=",
"None",
"last_frame",
"=",
"None",
"curr_count",
"=",... | Converts a sequence of frames to a series of padded
frame range strings.
Args:
frames (collections.Iterable): sequence of frames to process
zfill (int): width for zero padding
Yields:
str: | [
"Converts",
"a",
"sequence",
"of",
"frames",
"to",
"a",
"series",
"of",
"padded",
"frame",
"range",
"strings",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/frameset.py#L1058-L1103 | train | 213,568 |
sqlboy/fileseq | src/fileseq/frameset.py | FrameSet.framesToFrameRange | def framesToFrameRange(frames, sort=True, zfill=0, compress=False):
"""
Converts an iterator of frames into a
frame range string.
Args:
frames (collections.Iterable): sequence of frames to process
sort (bool): sort the sequence before processing
zfill (int): width for zero padding
compress (bool): remove any duplicates before processing
Returns:
str:
"""
if compress:
frames = unique(set(), frames)
frames = list(frames)
if not frames:
return ''
if len(frames) == 1:
return pad(frames[0], zfill)
if sort:
frames.sort()
return ','.join(FrameSet.framesToFrameRanges(frames, zfill)) | python | def framesToFrameRange(frames, sort=True, zfill=0, compress=False):
"""
Converts an iterator of frames into a
frame range string.
Args:
frames (collections.Iterable): sequence of frames to process
sort (bool): sort the sequence before processing
zfill (int): width for zero padding
compress (bool): remove any duplicates before processing
Returns:
str:
"""
if compress:
frames = unique(set(), frames)
frames = list(frames)
if not frames:
return ''
if len(frames) == 1:
return pad(frames[0], zfill)
if sort:
frames.sort()
return ','.join(FrameSet.framesToFrameRanges(frames, zfill)) | [
"def",
"framesToFrameRange",
"(",
"frames",
",",
"sort",
"=",
"True",
",",
"zfill",
"=",
"0",
",",
"compress",
"=",
"False",
")",
":",
"if",
"compress",
":",
"frames",
"=",
"unique",
"(",
"set",
"(",
")",
",",
"frames",
")",
"frames",
"=",
"list",
... | Converts an iterator of frames into a
frame range string.
Args:
frames (collections.Iterable): sequence of frames to process
sort (bool): sort the sequence before processing
zfill (int): width for zero padding
compress (bool): remove any duplicates before processing
Returns:
str: | [
"Converts",
"an",
"iterator",
"of",
"frames",
"into",
"a",
"frame",
"range",
"string",
"."
] | f26c3c3c383134ce27d5dfe37793e1ebe88e69ad | https://github.com/sqlboy/fileseq/blob/f26c3c3c383134ce27d5dfe37793e1ebe88e69ad/src/fileseq/frameset.py#L1106-L1129 | train | 213,569 |
litl/rauth | rauth/session.py | OAuth1Session._parse_optional_params | def _parse_optional_params(self, oauth_params, req_kwargs):
'''
Parses and sets optional OAuth parameters on a request.
:param oauth_param: The OAuth parameter to parse.
:type oauth_param: str
:param req_kwargs: The keyworded arguments passed to the request
method.
:type req_kwargs: dict
'''
params = req_kwargs.get('params', {})
data = req_kwargs.get('data') or {}
for oauth_param in OPTIONAL_OAUTH_PARAMS:
if oauth_param in params:
oauth_params[oauth_param] = params.pop(oauth_param)
if oauth_param in data:
oauth_params[oauth_param] = data.pop(oauth_param)
if params:
req_kwargs['params'] = params
if data:
req_kwargs['data'] = data | python | def _parse_optional_params(self, oauth_params, req_kwargs):
'''
Parses and sets optional OAuth parameters on a request.
:param oauth_param: The OAuth parameter to parse.
:type oauth_param: str
:param req_kwargs: The keyworded arguments passed to the request
method.
:type req_kwargs: dict
'''
params = req_kwargs.get('params', {})
data = req_kwargs.get('data') or {}
for oauth_param in OPTIONAL_OAUTH_PARAMS:
if oauth_param in params:
oauth_params[oauth_param] = params.pop(oauth_param)
if oauth_param in data:
oauth_params[oauth_param] = data.pop(oauth_param)
if params:
req_kwargs['params'] = params
if data:
req_kwargs['data'] = data | [
"def",
"_parse_optional_params",
"(",
"self",
",",
"oauth_params",
",",
"req_kwargs",
")",
":",
"params",
"=",
"req_kwargs",
".",
"get",
"(",
"'params'",
",",
"{",
"}",
")",
"data",
"=",
"req_kwargs",
".",
"get",
"(",
"'data'",
")",
"or",
"{",
"}",
"fo... | Parses and sets optional OAuth parameters on a request.
:param oauth_param: The OAuth parameter to parse.
:type oauth_param: str
:param req_kwargs: The keyworded arguments passed to the request
method.
:type req_kwargs: dict | [
"Parses",
"and",
"sets",
"optional",
"OAuth",
"parameters",
"on",
"a",
"request",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/session.py#L212-L235 | train | 213,570 |
litl/rauth | rauth/session.py | OAuth1Session._get_oauth_params | def _get_oauth_params(self, req_kwargs):
'''Prepares OAuth params for signing.'''
oauth_params = {}
oauth_params['oauth_consumer_key'] = self.consumer_key
oauth_params['oauth_nonce'] = sha1(
str(random()).encode('ascii')).hexdigest()
oauth_params['oauth_signature_method'] = self.signature.NAME
oauth_params['oauth_timestamp'] = int(time())
if self.access_token is not None:
oauth_params['oauth_token'] = self.access_token
oauth_params['oauth_version'] = self.VERSION
self._parse_optional_params(oauth_params, req_kwargs)
return oauth_params | python | def _get_oauth_params(self, req_kwargs):
'''Prepares OAuth params for signing.'''
oauth_params = {}
oauth_params['oauth_consumer_key'] = self.consumer_key
oauth_params['oauth_nonce'] = sha1(
str(random()).encode('ascii')).hexdigest()
oauth_params['oauth_signature_method'] = self.signature.NAME
oauth_params['oauth_timestamp'] = int(time())
if self.access_token is not None:
oauth_params['oauth_token'] = self.access_token
oauth_params['oauth_version'] = self.VERSION
self._parse_optional_params(oauth_params, req_kwargs)
return oauth_params | [
"def",
"_get_oauth_params",
"(",
"self",
",",
"req_kwargs",
")",
":",
"oauth_params",
"=",
"{",
"}",
"oauth_params",
"[",
"'oauth_consumer_key'",
"]",
"=",
"self",
".",
"consumer_key",
"oauth_params",
"[",
"'oauth_nonce'",
"]",
"=",
"sha1",
"(",
"str",
"(",
... | Prepares OAuth params for signing. | [
"Prepares",
"OAuth",
"params",
"for",
"signing",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/session.py#L237-L254 | train | 213,571 |
litl/rauth | rauth/session.py | OflySession.sign | def sign(url, app_id, app_secret, hash_meth='sha1', **params):
'''
A signature method which generates the necessary Ofly parameters.
:param app_id: The oFlyAppId, i.e. "application ID".
:type app_id: str
:param app_secret: The oFlyAppSecret, i.e. "shared secret".
:type app_secret: str
:param hash_meth: The hash method to use for signing, defaults to
"sha1".
:type hash_meth: str
:param \*\*params: Additional parameters.
:type \*\*\params: dict
'''
hash_meth_str = hash_meth
if hash_meth == 'sha1':
hash_meth = sha1
elif hash_meth == 'md5':
hash_meth = md5
else:
raise TypeError('hash_meth must be one of "sha1", "md5"')
now = datetime.utcnow()
milliseconds = now.microsecond // 1000
time_format = '%Y-%m-%dT%H:%M:%S.{0}Z'.format(milliseconds)
ofly_params = {'oflyAppId': app_id,
'oflyHashMeth': hash_meth_str.upper(),
'oflyTimestamp': now.strftime(time_format)}
url_path = urlsplit(url).path
signature_base_string = app_secret + url_path + '?'
if len(params):
signature_base_string += get_sorted_params(params) + '&'
signature_base_string += get_sorted_params(ofly_params)
if not isinstance(signature_base_string, bytes):
signature_base_string = signature_base_string.encode('utf-8')
ofly_params['oflyApiSig'] = \
hash_meth(signature_base_string).hexdigest()
all_params = dict(tuple(ofly_params.items()) + tuple(params.items()))
return get_sorted_params(all_params) | python | def sign(url, app_id, app_secret, hash_meth='sha1', **params):
'''
A signature method which generates the necessary Ofly parameters.
:param app_id: The oFlyAppId, i.e. "application ID".
:type app_id: str
:param app_secret: The oFlyAppSecret, i.e. "shared secret".
:type app_secret: str
:param hash_meth: The hash method to use for signing, defaults to
"sha1".
:type hash_meth: str
:param \*\*params: Additional parameters.
:type \*\*\params: dict
'''
hash_meth_str = hash_meth
if hash_meth == 'sha1':
hash_meth = sha1
elif hash_meth == 'md5':
hash_meth = md5
else:
raise TypeError('hash_meth must be one of "sha1", "md5"')
now = datetime.utcnow()
milliseconds = now.microsecond // 1000
time_format = '%Y-%m-%dT%H:%M:%S.{0}Z'.format(milliseconds)
ofly_params = {'oflyAppId': app_id,
'oflyHashMeth': hash_meth_str.upper(),
'oflyTimestamp': now.strftime(time_format)}
url_path = urlsplit(url).path
signature_base_string = app_secret + url_path + '?'
if len(params):
signature_base_string += get_sorted_params(params) + '&'
signature_base_string += get_sorted_params(ofly_params)
if not isinstance(signature_base_string, bytes):
signature_base_string = signature_base_string.encode('utf-8')
ofly_params['oflyApiSig'] = \
hash_meth(signature_base_string).hexdigest()
all_params = dict(tuple(ofly_params.items()) + tuple(params.items()))
return get_sorted_params(all_params) | [
"def",
"sign",
"(",
"url",
",",
"app_id",
",",
"app_secret",
",",
"hash_meth",
"=",
"'sha1'",
",",
"*",
"*",
"params",
")",
":",
"hash_meth_str",
"=",
"hash_meth",
"if",
"hash_meth",
"==",
"'sha1'",
":",
"hash_meth",
"=",
"sha1",
"elif",
"hash_meth",
"==... | A signature method which generates the necessary Ofly parameters.
:param app_id: The oFlyAppId, i.e. "application ID".
:type app_id: str
:param app_secret: The oFlyAppSecret, i.e. "shared secret".
:type app_secret: str
:param hash_meth: The hash method to use for signing, defaults to
"sha1".
:type hash_meth: str
:param \*\*params: Additional parameters.
:type \*\*\params: dict | [
"A",
"signature",
"method",
"which",
"generates",
"the",
"necessary",
"Ofly",
"parameters",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/session.py#L470-L515 | train | 213,572 |
litl/rauth | rauth/utils.py | OAuth1Auth._get_auth_header | def _get_auth_header(self):
''' Constructs and returns an authentication header. '''
realm = 'realm="{realm}"'.format(realm=self.realm)
params = ['{k}="{v}"'.format(k=k, v=quote(str(v), safe=''))
for k, v in self.oauth_params.items()]
return 'OAuth ' + ','.join([realm] + params) | python | def _get_auth_header(self):
''' Constructs and returns an authentication header. '''
realm = 'realm="{realm}"'.format(realm=self.realm)
params = ['{k}="{v}"'.format(k=k, v=quote(str(v), safe=''))
for k, v in self.oauth_params.items()]
return 'OAuth ' + ','.join([realm] + params) | [
"def",
"_get_auth_header",
"(",
"self",
")",
":",
"realm",
"=",
"'realm=\"{realm}\"'",
".",
"format",
"(",
"realm",
"=",
"self",
".",
"realm",
")",
"params",
"=",
"[",
"'{k}=\"{v}\"'",
".",
"format",
"(",
"k",
"=",
"k",
",",
"v",
"=",
"quote",
"(",
"... | Constructs and returns an authentication header. | [
"Constructs",
"and",
"returns",
"an",
"authentication",
"header",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/utils.py#L93-L98 | train | 213,573 |
litl/rauth | rauth/oauth.py | SignatureMethod._remove_qs | def _remove_qs(self, url):
'''
Removes a query string from a URL before signing.
:param url: The URL to strip.
:type url: str
'''
scheme, netloc, path, query, fragment = urlsplit(url)
return urlunsplit((scheme, netloc, path, '', fragment)) | python | def _remove_qs(self, url):
'''
Removes a query string from a URL before signing.
:param url: The URL to strip.
:type url: str
'''
scheme, netloc, path, query, fragment = urlsplit(url)
return urlunsplit((scheme, netloc, path, '', fragment)) | [
"def",
"_remove_qs",
"(",
"self",
",",
"url",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
"=",
"urlsplit",
"(",
"url",
")",
"return",
"urlunsplit",
"(",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"''",
",",
"... | Removes a query string from a URL before signing.
:param url: The URL to strip.
:type url: str | [
"Removes",
"a",
"query",
"string",
"from",
"a",
"URL",
"before",
"signing",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/oauth.py#L37-L46 | train | 213,574 |
litl/rauth | rauth/oauth.py | SignatureMethod._normalize_request_parameters | def _normalize_request_parameters(self, oauth_params, req_kwargs):
'''
This process normalizes the request parameters as detailed in the OAuth
1.0 spec.
Additionally we apply a `Content-Type` header to the request of the
`FORM_URLENCODE` type if the `Content-Type` was previously set, i.e. if
this is a `POST` or `PUT` request. This ensures the correct header is
set as per spec.
Finally we sort the parameters in preparation for signing and return
a URL encoded string of all normalized parameters.
:param oauth_params: OAuth params to sign with.
:type oauth_params: dict
:param req_kwargs: Request kwargs to normalize.
:type req_kwargs: dict
'''
normalized = []
params = req_kwargs.get('params', {})
data = req_kwargs.get('data', {})
headers = req_kwargs.get('headers', {})
# process request parameters
for k, v in params.items():
if v is not None:
normalized += [(k, v)]
# process request data
if 'Content-Type' in headers and \
headers['Content-Type'] == FORM_URLENCODED:
for k, v in data.items():
normalized += [(k, v)]
# extract values from our list of tuples
all_normalized = []
for t in normalized:
k, v = t
if is_basestring(v) and not isinstance(v, bytes):
v = v.encode('utf-8')
all_normalized += [(k, v)]
# add in the params from oauth_params for signing
for k, v in oauth_params.items():
if (k, v) in all_normalized: # pragma: no cover
continue
all_normalized += [(k, v)]
# sort the params as per the OAuth 1.0/a spec
all_normalized.sort()
# finally encode the params as a string
return urlencode(all_normalized, True)\
.replace('+', '%20')\
.replace('%7E', '~') | python | def _normalize_request_parameters(self, oauth_params, req_kwargs):
'''
This process normalizes the request parameters as detailed in the OAuth
1.0 spec.
Additionally we apply a `Content-Type` header to the request of the
`FORM_URLENCODE` type if the `Content-Type` was previously set, i.e. if
this is a `POST` or `PUT` request. This ensures the correct header is
set as per spec.
Finally we sort the parameters in preparation for signing and return
a URL encoded string of all normalized parameters.
:param oauth_params: OAuth params to sign with.
:type oauth_params: dict
:param req_kwargs: Request kwargs to normalize.
:type req_kwargs: dict
'''
normalized = []
params = req_kwargs.get('params', {})
data = req_kwargs.get('data', {})
headers = req_kwargs.get('headers', {})
# process request parameters
for k, v in params.items():
if v is not None:
normalized += [(k, v)]
# process request data
if 'Content-Type' in headers and \
headers['Content-Type'] == FORM_URLENCODED:
for k, v in data.items():
normalized += [(k, v)]
# extract values from our list of tuples
all_normalized = []
for t in normalized:
k, v = t
if is_basestring(v) and not isinstance(v, bytes):
v = v.encode('utf-8')
all_normalized += [(k, v)]
# add in the params from oauth_params for signing
for k, v in oauth_params.items():
if (k, v) in all_normalized: # pragma: no cover
continue
all_normalized += [(k, v)]
# sort the params as per the OAuth 1.0/a spec
all_normalized.sort()
# finally encode the params as a string
return urlencode(all_normalized, True)\
.replace('+', '%20')\
.replace('%7E', '~') | [
"def",
"_normalize_request_parameters",
"(",
"self",
",",
"oauth_params",
",",
"req_kwargs",
")",
":",
"normalized",
"=",
"[",
"]",
"params",
"=",
"req_kwargs",
".",
"get",
"(",
"'params'",
",",
"{",
"}",
")",
"data",
"=",
"req_kwargs",
".",
"get",
"(",
... | This process normalizes the request parameters as detailed in the OAuth
1.0 spec.
Additionally we apply a `Content-Type` header to the request of the
`FORM_URLENCODE` type if the `Content-Type` was previously set, i.e. if
this is a `POST` or `PUT` request. This ensures the correct header is
set as per spec.
Finally we sort the parameters in preparation for signing and return
a URL encoded string of all normalized parameters.
:param oauth_params: OAuth params to sign with.
:type oauth_params: dict
:param req_kwargs: Request kwargs to normalize.
:type req_kwargs: dict | [
"This",
"process",
"normalizes",
"the",
"request",
"parameters",
"as",
"detailed",
"in",
"the",
"OAuth",
"1",
".",
"0",
"spec",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/oauth.py#L48-L103 | train | 213,575 |
litl/rauth | rauth/oauth.py | PlaintextSignature.sign | def sign(self, consumer_secret, access_token_secret, method, url,
oauth_params, req_kwargs):
'''Sign request using PLAINTEXT method.
:param consumer_secret: Consumer secret.
:type consumer_secret: str
:param access_token_secret: Access token secret (optional).
:type access_token_secret: str
:param method: Unused
:type method: str
:param url: Unused
:type url: str
:param oauth_params: Unused
:type oauth_params: dict
:param req_kwargs: Unused
:type req_kwargs: dict
'''
key = self._escape(consumer_secret) + b'&'
if access_token_secret:
key += self._escape(access_token_secret)
return key.decode() | python | def sign(self, consumer_secret, access_token_secret, method, url,
oauth_params, req_kwargs):
'''Sign request using PLAINTEXT method.
:param consumer_secret: Consumer secret.
:type consumer_secret: str
:param access_token_secret: Access token secret (optional).
:type access_token_secret: str
:param method: Unused
:type method: str
:param url: Unused
:type url: str
:param oauth_params: Unused
:type oauth_params: dict
:param req_kwargs: Unused
:type req_kwargs: dict
'''
key = self._escape(consumer_secret) + b'&'
if access_token_secret:
key += self._escape(access_token_secret)
return key.decode() | [
"def",
"sign",
"(",
"self",
",",
"consumer_secret",
",",
"access_token_secret",
",",
"method",
",",
"url",
",",
"oauth_params",
",",
"req_kwargs",
")",
":",
"key",
"=",
"self",
".",
"_escape",
"(",
"consumer_secret",
")",
"+",
"b'&'",
"if",
"access_token_sec... | Sign request using PLAINTEXT method.
:param consumer_secret: Consumer secret.
:type consumer_secret: str
:param access_token_secret: Access token secret (optional).
:type access_token_secret: str
:param method: Unused
:type method: str
:param url: Unused
:type url: str
:param oauth_params: Unused
:type oauth_params: dict
:param req_kwargs: Unused
:type req_kwargs: dict | [
"Sign",
"request",
"using",
"PLAINTEXT",
"method",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/oauth.py#L228-L248 | train | 213,576 |
litl/rauth | rauth/service.py | OAuth1Service.get_request_token | def get_request_token(self,
method='GET',
decoder=parse_utf8_qsl,
key_token='oauth_token',
key_token_secret='oauth_token_secret',
**kwargs):
'''
Return a request token pair.
:param method: A string representation of the HTTP method to be used,
defaults to `GET`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key_token: The key the access token will be decoded by, defaults
to 'oauth_token'.
:type string:
:param key_token_secret: The key the access token will be decoded by,
defaults to 'oauth_token_secret'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
r = self.get_raw_request_token(method=method, **kwargs)
request_token, request_token_secret = \
process_token_request(r, decoder, key_token, key_token_secret)
return request_token, request_token_secret | python | def get_request_token(self,
method='GET',
decoder=parse_utf8_qsl,
key_token='oauth_token',
key_token_secret='oauth_token_secret',
**kwargs):
'''
Return a request token pair.
:param method: A string representation of the HTTP method to be used,
defaults to `GET`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key_token: The key the access token will be decoded by, defaults
to 'oauth_token'.
:type string:
:param key_token_secret: The key the access token will be decoded by,
defaults to 'oauth_token_secret'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
r = self.get_raw_request_token(method=method, **kwargs)
request_token, request_token_secret = \
process_token_request(r, decoder, key_token, key_token_secret)
return request_token, request_token_secret | [
"def",
"get_request_token",
"(",
"self",
",",
"method",
"=",
"'GET'",
",",
"decoder",
"=",
"parse_utf8_qsl",
",",
"key_token",
"=",
"'oauth_token'",
",",
"key_token_secret",
"=",
"'oauth_token_secret'",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"self",
".... | Return a request token pair.
:param method: A string representation of the HTTP method to be used,
defaults to `GET`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key_token: The key the access token will be decoded by, defaults
to 'oauth_token'.
:type string:
:param key_token_secret: The key the access token will be decoded by,
defaults to 'oauth_token_secret'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict | [
"Return",
"a",
"request",
"token",
"pair",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/service.py#L218-L245 | train | 213,577 |
litl/rauth | rauth/service.py | OAuth1Service.get_access_token | def get_access_token(self,
request_token,
request_token_secret,
method='GET',
decoder=parse_utf8_qsl,
key_token='oauth_token',
key_token_secret='oauth_token_secret',
**kwargs):
'''
Returns an access token pair.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key_token: The key the access token will be decoded by, defaults
to 'oauth_token'.
:type string:
:param key_token_secret: The key the access token will be decoded by,
defaults to 'oauth_token_secret'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
r = self.get_raw_access_token(request_token,
request_token_secret,
method=method,
**kwargs)
access_token, access_token_secret = \
process_token_request(r, decoder, key_token, key_token_secret)
return access_token, access_token_secret | python | def get_access_token(self,
request_token,
request_token_secret,
method='GET',
decoder=parse_utf8_qsl,
key_token='oauth_token',
key_token_secret='oauth_token_secret',
**kwargs):
'''
Returns an access token pair.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key_token: The key the access token will be decoded by, defaults
to 'oauth_token'.
:type string:
:param key_token_secret: The key the access token will be decoded by,
defaults to 'oauth_token_secret'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
r = self.get_raw_access_token(request_token,
request_token_secret,
method=method,
**kwargs)
access_token, access_token_secret = \
process_token_request(r, decoder, key_token, key_token_secret)
return access_token, access_token_secret | [
"def",
"get_access_token",
"(",
"self",
",",
"request_token",
",",
"request_token_secret",
",",
"method",
"=",
"'GET'",
",",
"decoder",
"=",
"parse_utf8_qsl",
",",
"key_token",
"=",
"'oauth_token'",
",",
"key_token_secret",
"=",
"'oauth_token_secret'",
",",
"*",
"... | Returns an access token pair.
:param request_token: The request token as returned by
:meth:`get_request_token`.
:type request_token: str
:param request_token_secret: The request token secret as returned by
:meth:`get_request_token`.
:type request_token_secret: str
:param method: A string representation of the HTTP method to be
used, defaults to `GET`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key_token: The key the access token will be decoded by, defaults
to 'oauth_token'.
:type string:
:param key_token_secret: The key the access token will be decoded by,
defaults to 'oauth_token_secret'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict | [
"Returns",
"an",
"access",
"token",
"pair",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/service.py#L294-L333 | train | 213,578 |
litl/rauth | rauth/service.py | OAuth2Service.get_access_token | def get_access_token(self,
method='POST',
decoder=parse_utf8_qsl,
key='access_token',
**kwargs):
'''
Returns an access token.
:param method: A string representation of the HTTP method to be used,
defaults to `POST`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key: The key the access token will be decoded by, defaults to
'access_token'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
r = self.get_raw_access_token(method, **kwargs)
access_token, = process_token_request(r, decoder, key)
return access_token | python | def get_access_token(self,
method='POST',
decoder=parse_utf8_qsl,
key='access_token',
**kwargs):
'''
Returns an access token.
:param method: A string representation of the HTTP method to be used,
defaults to `POST`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key: The key the access token will be decoded by, defaults to
'access_token'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict
'''
r = self.get_raw_access_token(method, **kwargs)
access_token, = process_token_request(r, decoder, key)
return access_token | [
"def",
"get_access_token",
"(",
"self",
",",
"method",
"=",
"'POST'",
",",
"decoder",
"=",
"parse_utf8_qsl",
",",
"key",
"=",
"'access_token'",
",",
"*",
"*",
"kwargs",
")",
":",
"r",
"=",
"self",
".",
"get_raw_access_token",
"(",
"method",
",",
"*",
"*"... | Returns an access token.
:param method: A string representation of the HTTP method to be used,
defaults to `POST`.
:type method: str
:param decoder: A function used to parse the Response content. Should
return a dictionary.
:type decoder: func
:param key: The key the access token will be decoded by, defaults to
'access_token'.
:type string:
:param \*\*kwargs: Optional arguments. Same as Requests.
:type \*\*kwargs: dict | [
"Returns",
"an",
"access",
"token",
"."
] | a6d887d7737cf21ec896a8104f25c2754c694011 | https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/service.py#L521-L543 | train | 213,579 |
rochacbruno/flask_simplelogin | flask_simplelogin/__init__.py | is_logged_in | def is_logged_in(username=None):
"""Checks if user is logged in if `username`
is passed check if specified user is logged in
username can be a list"""
if username:
if not isinstance(username, (list, tuple)):
username = [username]
return 'simple_logged_in' in session and get_username() in username
return 'simple_logged_in' in session | python | def is_logged_in(username=None):
"""Checks if user is logged in if `username`
is passed check if specified user is logged in
username can be a list"""
if username:
if not isinstance(username, (list, tuple)):
username = [username]
return 'simple_logged_in' in session and get_username() in username
return 'simple_logged_in' in session | [
"def",
"is_logged_in",
"(",
"username",
"=",
"None",
")",
":",
"if",
"username",
":",
"if",
"not",
"isinstance",
"(",
"username",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"username",
"=",
"[",
"username",
"]",
"return",
"'simple_logged_in'",
"in",
... | Checks if user is logged in if `username`
is passed check if specified user is logged in
username can be a list | [
"Checks",
"if",
"user",
"is",
"logged",
"in",
"if",
"username",
"is",
"passed",
"check",
"if",
"specified",
"user",
"is",
"logged",
"in",
"username",
"can",
"be",
"a",
"list"
] | 5b319977053649352daa87a6b0632949eee0643c | https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/flask_simplelogin/__init__.py#L49-L57 | train | 213,580 |
rochacbruno/flask_simplelogin | flask_simplelogin/__init__.py | login_required | def login_required(function=None, username=None, basic=False, must=None):
"""Decorate views to require login
@login_required
@login_required()
@login_required(username='admin')
@login_required(username=['admin', 'jon'])
@login_required(basic=True)
@login_required(must=[function, another_function])
"""
if function and not callable(function):
raise ValueError(
'Decorator receives only named arguments, '
'try login_required(username="foo")'
)
def check(validators):
"""Return in the first validation error, else return None"""
if validators is None:
return
if not isinstance(validators, (list, tuple)):
validators = [validators]
for validator in validators:
error = validator(get_username())
if error is not None:
return SimpleLogin.get_message('auth_error', error), 403
def dispatch(fun, *args, **kwargs):
if basic and request.is_json:
return dispatch_basic_auth(fun, *args, **kwargs)
if is_logged_in(username=username):
return check(must) or fun(*args, **kwargs)
elif is_logged_in():
return SimpleLogin.get_message('access_denied'), 403
else:
flash(SimpleLogin.get_message('login_required'), 'warning')
return redirect(url_for('simplelogin.login', next=request.path))
def dispatch_basic_auth(fun, *args, **kwargs):
simplelogin = current_app.extensions['simplelogin']
auth_response = simplelogin.basic_auth()
if auth_response is True:
return check(must) or fun(*args, **kwargs)
else:
return auth_response
if function:
@wraps(function)
def simple_decorator(*args, **kwargs):
"""This is for when decorator is @login_required"""
return dispatch(function, *args, **kwargs)
return simple_decorator
def decorator(f):
"""This is for when decorator is @login_required(...)"""
@wraps(f)
def wrap(*args, **kwargs):
return dispatch(f, *args, **kwargs)
return wrap
return decorator | python | def login_required(function=None, username=None, basic=False, must=None):
"""Decorate views to require login
@login_required
@login_required()
@login_required(username='admin')
@login_required(username=['admin', 'jon'])
@login_required(basic=True)
@login_required(must=[function, another_function])
"""
if function and not callable(function):
raise ValueError(
'Decorator receives only named arguments, '
'try login_required(username="foo")'
)
def check(validators):
"""Return in the first validation error, else return None"""
if validators is None:
return
if not isinstance(validators, (list, tuple)):
validators = [validators]
for validator in validators:
error = validator(get_username())
if error is not None:
return SimpleLogin.get_message('auth_error', error), 403
def dispatch(fun, *args, **kwargs):
if basic and request.is_json:
return dispatch_basic_auth(fun, *args, **kwargs)
if is_logged_in(username=username):
return check(must) or fun(*args, **kwargs)
elif is_logged_in():
return SimpleLogin.get_message('access_denied'), 403
else:
flash(SimpleLogin.get_message('login_required'), 'warning')
return redirect(url_for('simplelogin.login', next=request.path))
def dispatch_basic_auth(fun, *args, **kwargs):
simplelogin = current_app.extensions['simplelogin']
auth_response = simplelogin.basic_auth()
if auth_response is True:
return check(must) or fun(*args, **kwargs)
else:
return auth_response
if function:
@wraps(function)
def simple_decorator(*args, **kwargs):
"""This is for when decorator is @login_required"""
return dispatch(function, *args, **kwargs)
return simple_decorator
def decorator(f):
"""This is for when decorator is @login_required(...)"""
@wraps(f)
def wrap(*args, **kwargs):
return dispatch(f, *args, **kwargs)
return wrap
return decorator | [
"def",
"login_required",
"(",
"function",
"=",
"None",
",",
"username",
"=",
"None",
",",
"basic",
"=",
"False",
",",
"must",
"=",
"None",
")",
":",
"if",
"function",
"and",
"not",
"callable",
"(",
"function",
")",
":",
"raise",
"ValueError",
"(",
"'De... | Decorate views to require login
@login_required
@login_required()
@login_required(username='admin')
@login_required(username=['admin', 'jon'])
@login_required(basic=True)
@login_required(must=[function, another_function]) | [
"Decorate",
"views",
"to",
"require",
"login"
] | 5b319977053649352daa87a6b0632949eee0643c | https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/flask_simplelogin/__init__.py#L65-L125 | train | 213,581 |
rochacbruno/flask_simplelogin | flask_simplelogin/__init__.py | SimpleLogin.get_message | def get_message(message, *args, **kwargs):
"""Helper to get internal messages outside this instance"""
msg = current_app.extensions['simplelogin'].messages.get(message)
if msg and (args or kwargs):
return msg.format(*args, **kwargs)
return msg | python | def get_message(message, *args, **kwargs):
"""Helper to get internal messages outside this instance"""
msg = current_app.extensions['simplelogin'].messages.get(message)
if msg and (args or kwargs):
return msg.format(*args, **kwargs)
return msg | [
"def",
"get_message",
"(",
"message",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"current_app",
".",
"extensions",
"[",
"'simplelogin'",
"]",
".",
"messages",
".",
"get",
"(",
"message",
")",
"if",
"msg",
"and",
"(",
"args",
"or... | Helper to get internal messages outside this instance | [
"Helper",
"to",
"get",
"internal",
"messages",
"outside",
"this",
"instance"
] | 5b319977053649352daa87a6b0632949eee0643c | https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/flask_simplelogin/__init__.py#L142-L147 | train | 213,582 |
rochacbruno/flask_simplelogin | example/simple_app.py | check_my_users | def check_my_users(user):
"""Check if user exists and its credentials.
Take a look at encrypt_app.py and encrypt_cli.py
to see how to encrypt passwords
"""
user_data = my_users.get(user['username'])
if not user_data:
return False # <--- invalid credentials
elif user_data.get('password') == user['password']:
return True # <--- user is logged in!
return False | python | def check_my_users(user):
"""Check if user exists and its credentials.
Take a look at encrypt_app.py and encrypt_cli.py
to see how to encrypt passwords
"""
user_data = my_users.get(user['username'])
if not user_data:
return False # <--- invalid credentials
elif user_data.get('password') == user['password']:
return True # <--- user is logged in!
return False | [
"def",
"check_my_users",
"(",
"user",
")",
":",
"user_data",
"=",
"my_users",
".",
"get",
"(",
"user",
"[",
"'username'",
"]",
")",
"if",
"not",
"user_data",
":",
"return",
"False",
"# <--- invalid credentials",
"elif",
"user_data",
".",
"get",
"(",
"'passwo... | Check if user exists and its credentials.
Take a look at encrypt_app.py and encrypt_cli.py
to see how to encrypt passwords | [
"Check",
"if",
"user",
"exists",
"and",
"its",
"credentials",
".",
"Take",
"a",
"look",
"at",
"encrypt_app",
".",
"py",
"and",
"encrypt_cli",
".",
"py",
"to",
"see",
"how",
"to",
"encrypt",
"passwords"
] | 5b319977053649352daa87a6b0632949eee0643c | https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/example/simple_app.py#L13-L24 | train | 213,583 |
rochacbruno/flask_simplelogin | example/manage.py | create_user | def create_user(**data):
"""Creates user with encrypted password"""
if 'username' not in data or 'password' not in data:
raise ValueError('username and password are required.')
# Hash the user password
data['password'] = generate_password_hash(
data.pop('password'),
method='pbkdf2:sha256'
)
# Here you insert the `data` in your users database
# for this simple example we are recording in a json file
db_users = json.load(open('users.json'))
# add the new created user to json
db_users[data['username']] = data
# commit changes to database
json.dump(db_users, open('users.json', 'w'))
return data | python | def create_user(**data):
"""Creates user with encrypted password"""
if 'username' not in data or 'password' not in data:
raise ValueError('username and password are required.')
# Hash the user password
data['password'] = generate_password_hash(
data.pop('password'),
method='pbkdf2:sha256'
)
# Here you insert the `data` in your users database
# for this simple example we are recording in a json file
db_users = json.load(open('users.json'))
# add the new created user to json
db_users[data['username']] = data
# commit changes to database
json.dump(db_users, open('users.json', 'w'))
return data | [
"def",
"create_user",
"(",
"*",
"*",
"data",
")",
":",
"if",
"'username'",
"not",
"in",
"data",
"or",
"'password'",
"not",
"in",
"data",
":",
"raise",
"ValueError",
"(",
"'username and password are required.'",
")",
"# Hash the user password",
"data",
"[",
"'pas... | Creates user with encrypted password | [
"Creates",
"user",
"with",
"encrypted",
"password"
] | 5b319977053649352daa87a6b0632949eee0643c | https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/example/manage.py#L23-L41 | train | 213,584 |
rochacbruno/flask_simplelogin | example/manage.py | with_app | def with_app(f):
"""Calls function passing app as first argument"""
@wraps(f)
def decorator(*args, **kwargs):
app = create_app()
configure_extensions(app)
configure_views(app)
return f(app=app, *args, **kwargs)
return decorator | python | def with_app(f):
"""Calls function passing app as first argument"""
@wraps(f)
def decorator(*args, **kwargs):
app = create_app()
configure_extensions(app)
configure_views(app)
return f(app=app, *args, **kwargs)
return decorator | [
"def",
"with_app",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"app",
"=",
"create_app",
"(",
")",
"configure_extensions",
"(",
"app",
")",
"configure_views",
"(",
"app",
... | Calls function passing app as first argument | [
"Calls",
"function",
"passing",
"app",
"as",
"first",
"argument"
] | 5b319977053649352daa87a6b0632949eee0643c | https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/example/manage.py#L83-L91 | train | 213,585 |
rochacbruno/flask_simplelogin | example/manage.py | adduser | def adduser(app, username, password):
"""Add new user with admin access"""
with app.app_context():
create_user(username=username, password=password)
click.echo('user created!') | python | def adduser(app, username, password):
"""Add new user with admin access"""
with app.app_context():
create_user(username=username, password=password)
click.echo('user created!') | [
"def",
"adduser",
"(",
"app",
",",
"username",
",",
"password",
")",
":",
"with",
"app",
".",
"app_context",
"(",
")",
":",
"create_user",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"click",
".",
"echo",
"(",
"'user created!... | Add new user with admin access | [
"Add",
"new",
"user",
"with",
"admin",
"access"
] | 5b319977053649352daa87a6b0632949eee0643c | https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/example/manage.py#L104-L108 | train | 213,586 |
dbrgn/RPLCD | RPLCD/i2c.py | CharLCD._pulse_data | def _pulse_data(self, value):
"""Pulse the `enable` flag to process value."""
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(1)
self.bus.write_byte(self._address, value | PCF8574_E | self._backlight)
c.usleep(1)
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(100)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data &= ~MCP230XX_DATAMASK
self._mcp_data |= value << MCP230XX_DATASHIFT
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data |= MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(100) | python | def _pulse_data(self, value):
"""Pulse the `enable` flag to process value."""
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(1)
self.bus.write_byte(self._address, value | PCF8574_E | self._backlight)
c.usleep(1)
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(100)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data &= ~MCP230XX_DATAMASK
self._mcp_data |= value << MCP230XX_DATASHIFT
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data |= MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(100) | [
"def",
"_pulse_data",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"_i2c_expander",
"==",
"'PCF8574'",
":",
"self",
".",
"bus",
".",
"write_byte",
"(",
"self",
".",
"_address",
",",
"(",
"(",
"value",
"&",
"~",
"PCF8574_E",
")",
"|",
"self... | Pulse the `enable` flag to process value. | [
"Pulse",
"the",
"enable",
"flag",
"to",
"process",
"value",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/i2c.py#L251-L271 | train | 213,587 |
dbrgn/RPLCD | RPLCD/gpio.py | CharLCD._write4bits | def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 7], bit)
self._pulse_enable() | python | def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 7], bit)
self._pulse_enable() | [
"def",
"_write4bits",
"(",
"self",
",",
"value",
")",
":",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"bit",
"=",
"(",
"value",
">>",
"i",
")",
"&",
"0x01",
"GPIO",
".",
"output",
"(",
"self",
".",
"pins",
"[",
"i",
"+",
"7",
"]",
",",
... | Write 4 bits of data into the data bus. | [
"Write",
"4",
"bits",
"of",
"data",
"into",
"the",
"data",
"bus",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/gpio.py#L223-L228 | train | 213,588 |
dbrgn/RPLCD | RPLCD/gpio.py | CharLCD._pulse_enable | def _pulse_enable(self):
"""Pulse the `enable` flag to process data."""
GPIO.output(self.pins.e, 0)
c.usleep(1)
GPIO.output(self.pins.e, 1)
c.usleep(1)
GPIO.output(self.pins.e, 0)
c.usleep(100) | python | def _pulse_enable(self):
"""Pulse the `enable` flag to process data."""
GPIO.output(self.pins.e, 0)
c.usleep(1)
GPIO.output(self.pins.e, 1)
c.usleep(1)
GPIO.output(self.pins.e, 0)
c.usleep(100) | [
"def",
"_pulse_enable",
"(",
"self",
")",
":",
"GPIO",
".",
"output",
"(",
"self",
".",
"pins",
".",
"e",
",",
"0",
")",
"c",
".",
"usleep",
"(",
"1",
")",
"GPIO",
".",
"output",
"(",
"self",
".",
"pins",
".",
"e",
",",
"1",
")",
"c",
".",
... | Pulse the `enable` flag to process data. | [
"Pulse",
"the",
"enable",
"flag",
"to",
"process",
"data",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/gpio.py#L237-L244 | train | 213,589 |
dbrgn/RPLCD | RPLCD/lcd.py | BaseCharLCD.write_string | def write_string(self, value):
"""
Write the specified unicode string to the display.
To control multiline behavior, use newline (``\\n``) and carriage
return (``\\r``) characters.
Lines that are too long automatically continue on next line, as long as
``auto_linebreaks`` has not been disabled.
Make sure that you're only passing unicode objects to this function.
The unicode string is then converted to the correct LCD encoding by
using the charmap specified at instantiation time.
If you're dealing with bytestrings (the default string type in Python
2), convert it to a unicode object using the ``.decode(encoding)``
method and the appropriate encoding. Example for UTF-8 encoded strings:
.. code::
>>> bstring = 'Temperature: 30°C'
>>> bstring
'Temperature: 30\xc2\xb0C'
>>> bstring.decode('utf-8')
u'Temperature: 30\xb0C'
"""
encoded = self.codec.encode(value) # type: List[int]
ignored = False
for [char, lookahead] in c.sliding_window(encoded, lookahead=1):
# If the previous character has been ignored, skip this one too.
if ignored is True:
ignored = False
continue
# Write regular chars
if char not in [codecs.CR, codecs.LF]:
self.write(char)
continue
# We're now left with only CR and LF characters. If an auto
# linebreak happened recently, and the lookahead matches too,
# ignore this write.
if self.recent_auto_linebreak is True:
crlf = (char == codecs.CR and lookahead == codecs.LF)
lfcr = (char == codecs.LF and lookahead == codecs.CR)
if crlf or lfcr:
ignored = True
continue
# Handle newlines and carriage returns
row, col = self.cursor_pos
if char == codecs.LF:
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, col)
else:
self.cursor_pos = (0, col)
elif char == codecs.CR:
if self.text_align_mode == 'left':
self.cursor_pos = (row, 0)
else:
self.cursor_pos = (row, self.lcd.cols - 1) | python | def write_string(self, value):
"""
Write the specified unicode string to the display.
To control multiline behavior, use newline (``\\n``) and carriage
return (``\\r``) characters.
Lines that are too long automatically continue on next line, as long as
``auto_linebreaks`` has not been disabled.
Make sure that you're only passing unicode objects to this function.
The unicode string is then converted to the correct LCD encoding by
using the charmap specified at instantiation time.
If you're dealing with bytestrings (the default string type in Python
2), convert it to a unicode object using the ``.decode(encoding)``
method and the appropriate encoding. Example for UTF-8 encoded strings:
.. code::
>>> bstring = 'Temperature: 30°C'
>>> bstring
'Temperature: 30\xc2\xb0C'
>>> bstring.decode('utf-8')
u'Temperature: 30\xb0C'
"""
encoded = self.codec.encode(value) # type: List[int]
ignored = False
for [char, lookahead] in c.sliding_window(encoded, lookahead=1):
# If the previous character has been ignored, skip this one too.
if ignored is True:
ignored = False
continue
# Write regular chars
if char not in [codecs.CR, codecs.LF]:
self.write(char)
continue
# We're now left with only CR and LF characters. If an auto
# linebreak happened recently, and the lookahead matches too,
# ignore this write.
if self.recent_auto_linebreak is True:
crlf = (char == codecs.CR and lookahead == codecs.LF)
lfcr = (char == codecs.LF and lookahead == codecs.CR)
if crlf or lfcr:
ignored = True
continue
# Handle newlines and carriage returns
row, col = self.cursor_pos
if char == codecs.LF:
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, col)
else:
self.cursor_pos = (0, col)
elif char == codecs.CR:
if self.text_align_mode == 'left':
self.cursor_pos = (row, 0)
else:
self.cursor_pos = (row, self.lcd.cols - 1) | [
"def",
"write_string",
"(",
"self",
",",
"value",
")",
":",
"encoded",
"=",
"self",
".",
"codec",
".",
"encode",
"(",
"value",
")",
"# type: List[int]",
"ignored",
"=",
"False",
"for",
"[",
"char",
",",
"lookahead",
"]",
"in",
"c",
".",
"sliding_window",... | Write the specified unicode string to the display.
To control multiline behavior, use newline (``\\n``) and carriage
return (``\\r``) characters.
Lines that are too long automatically continue on next line, as long as
``auto_linebreaks`` has not been disabled.
Make sure that you're only passing unicode objects to this function.
The unicode string is then converted to the correct LCD encoding by
using the charmap specified at instantiation time.
If you're dealing with bytestrings (the default string type in Python
2), convert it to a unicode object using the ``.decode(encoding)``
method and the appropriate encoding. Example for UTF-8 encoded strings:
.. code::
>>> bstring = 'Temperature: 30°C'
>>> bstring
'Temperature: 30\xc2\xb0C'
>>> bstring.decode('utf-8')
u'Temperature: 30\xb0C' | [
"Write",
"the",
"specified",
"unicode",
"string",
"to",
"the",
"display",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/lcd.py#L244-L307 | train | 213,590 |
dbrgn/RPLCD | RPLCD/lcd.py | BaseCharLCD.clear | def clear(self):
"""Overwrite display with blank characters and reset cursor position."""
self.command(c.LCD_CLEARDISPLAY)
self._cursor_pos = (0, 0)
self._content = [[0x20] * self.lcd.cols for _ in range(self.lcd.rows)]
c.msleep(2) | python | def clear(self):
"""Overwrite display with blank characters and reset cursor position."""
self.command(c.LCD_CLEARDISPLAY)
self._cursor_pos = (0, 0)
self._content = [[0x20] * self.lcd.cols for _ in range(self.lcd.rows)]
c.msleep(2) | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"command",
"(",
"c",
".",
"LCD_CLEARDISPLAY",
")",
"self",
".",
"_cursor_pos",
"=",
"(",
"0",
",",
"0",
")",
"self",
".",
"_content",
"=",
"[",
"[",
"0x20",
"]",
"*",
"self",
".",
"lcd",
".",
... | Overwrite display with blank characters and reset cursor position. | [
"Overwrite",
"display",
"with",
"blank",
"characters",
"and",
"reset",
"cursor",
"position",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/lcd.py#L309-L314 | train | 213,591 |
dbrgn/RPLCD | RPLCD/lcd.py | BaseCharLCD.home | def home(self):
"""Set cursor to initial position and reset any shifting."""
self.command(c.LCD_RETURNHOME)
self._cursor_pos = (0, 0)
c.msleep(2) | python | def home(self):
"""Set cursor to initial position and reset any shifting."""
self.command(c.LCD_RETURNHOME)
self._cursor_pos = (0, 0)
c.msleep(2) | [
"def",
"home",
"(",
"self",
")",
":",
"self",
".",
"command",
"(",
"c",
".",
"LCD_RETURNHOME",
")",
"self",
".",
"_cursor_pos",
"=",
"(",
"0",
",",
"0",
")",
"c",
".",
"msleep",
"(",
"2",
")"
] | Set cursor to initial position and reset any shifting. | [
"Set",
"cursor",
"to",
"initial",
"position",
"and",
"reset",
"any",
"shifting",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/lcd.py#L316-L320 | train | 213,592 |
dbrgn/RPLCD | RPLCD/lcd.py | BaseCharLCD.shift_display | def shift_display(self, amount):
"""Shift the display. Use negative amounts to shift left and positive
amounts to shift right."""
if amount == 0:
return
direction = c.LCD_MOVERIGHT if amount > 0 else c.LCD_MOVELEFT
for i in range(abs(amount)):
self.command(c.LCD_CURSORSHIFT | c.LCD_DISPLAYMOVE | direction)
c.usleep(50) | python | def shift_display(self, amount):
"""Shift the display. Use negative amounts to shift left and positive
amounts to shift right."""
if amount == 0:
return
direction = c.LCD_MOVERIGHT if amount > 0 else c.LCD_MOVELEFT
for i in range(abs(amount)):
self.command(c.LCD_CURSORSHIFT | c.LCD_DISPLAYMOVE | direction)
c.usleep(50) | [
"def",
"shift_display",
"(",
"self",
",",
"amount",
")",
":",
"if",
"amount",
"==",
"0",
":",
"return",
"direction",
"=",
"c",
".",
"LCD_MOVERIGHT",
"if",
"amount",
">",
"0",
"else",
"c",
".",
"LCD_MOVELEFT",
"for",
"i",
"in",
"range",
"(",
"abs",
"(... | Shift the display. Use negative amounts to shift left and positive
amounts to shift right. | [
"Shift",
"the",
"display",
".",
"Use",
"negative",
"amounts",
"to",
"shift",
"left",
"and",
"positive",
"amounts",
"to",
"shift",
"right",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/lcd.py#L322-L330 | train | 213,593 |
dbrgn/RPLCD | RPLCD/lcd.py | BaseCharLCD.write | def write(self, value): # type: (int) -> None
"""Write a raw byte to the LCD."""
# Get current position
row, col = self._cursor_pos
# Write byte if changed
try:
if self._content[row][col] != value:
self._send_data(value)
self._content[row][col] = value # Update content cache
unchanged = False
else:
unchanged = True
except IndexError as e:
# Position out of range
if self.auto_linebreaks is True:
raise e
self._send_data(value)
unchanged = False
# Update cursor position.
if self.text_align_mode == 'left':
if self.auto_linebreaks is False or col < self.lcd.cols - 1:
# No newline, update internal pointer
newpos = (row, col + 1)
if unchanged:
self.cursor_pos = newpos
else:
self._cursor_pos = newpos
self.recent_auto_linebreak = False
else:
# Newline, reset pointer
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, 0)
else:
self.cursor_pos = (0, 0)
self.recent_auto_linebreak = True
else:
if self.auto_linebreaks is False or col > 0:
# No newline, update internal pointer
newpos = (row, col - 1)
if unchanged:
self.cursor_pos = newpos
else:
self._cursor_pos = newpos
self.recent_auto_linebreak = False
else:
# Newline, reset pointer
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, self.lcd.cols - 1)
else:
self.cursor_pos = (0, self.lcd.cols - 1)
self.recent_auto_linebreak = True | python | def write(self, value): # type: (int) -> None
"""Write a raw byte to the LCD."""
# Get current position
row, col = self._cursor_pos
# Write byte if changed
try:
if self._content[row][col] != value:
self._send_data(value)
self._content[row][col] = value # Update content cache
unchanged = False
else:
unchanged = True
except IndexError as e:
# Position out of range
if self.auto_linebreaks is True:
raise e
self._send_data(value)
unchanged = False
# Update cursor position.
if self.text_align_mode == 'left':
if self.auto_linebreaks is False or col < self.lcd.cols - 1:
# No newline, update internal pointer
newpos = (row, col + 1)
if unchanged:
self.cursor_pos = newpos
else:
self._cursor_pos = newpos
self.recent_auto_linebreak = False
else:
# Newline, reset pointer
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, 0)
else:
self.cursor_pos = (0, 0)
self.recent_auto_linebreak = True
else:
if self.auto_linebreaks is False or col > 0:
# No newline, update internal pointer
newpos = (row, col - 1)
if unchanged:
self.cursor_pos = newpos
else:
self._cursor_pos = newpos
self.recent_auto_linebreak = False
else:
# Newline, reset pointer
if row < self.lcd.rows - 1:
self.cursor_pos = (row + 1, self.lcd.cols - 1)
else:
self.cursor_pos = (0, self.lcd.cols - 1)
self.recent_auto_linebreak = True | [
"def",
"write",
"(",
"self",
",",
"value",
")",
":",
"# type: (int) -> None",
"# Get current position",
"row",
",",
"col",
"=",
"self",
".",
"_cursor_pos",
"# Write byte if changed",
"try",
":",
"if",
"self",
".",
"_content",
"[",
"row",
"]",
"[",
"col",
"]"... | Write a raw byte to the LCD. | [
"Write",
"a",
"raw",
"byte",
"to",
"the",
"LCD",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/lcd.py#L383-L436 | train | 213,594 |
dbrgn/RPLCD | RPLCD/contextmanagers.py | cursor | def cursor(lcd, row, col):
"""
Context manager to control cursor position. DEPRECATED.
"""
warnings.warn('The `cursor` context manager is deprecated', DeprecationWarning)
lcd.cursor_pos = (row, col)
yield | python | def cursor(lcd, row, col):
"""
Context manager to control cursor position. DEPRECATED.
"""
warnings.warn('The `cursor` context manager is deprecated', DeprecationWarning)
lcd.cursor_pos = (row, col)
yield | [
"def",
"cursor",
"(",
"lcd",
",",
"row",
",",
"col",
")",
":",
"warnings",
".",
"warn",
"(",
"'The `cursor` context manager is deprecated'",
",",
"DeprecationWarning",
")",
"lcd",
".",
"cursor_pos",
"=",
"(",
"row",
",",
"col",
")",
"yield"
] | Context manager to control cursor position. DEPRECATED. | [
"Context",
"manager",
"to",
"control",
"cursor",
"position",
".",
"DEPRECATED",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/contextmanagers.py#L9-L15 | train | 213,595 |
dbrgn/RPLCD | RPLCD/common.py | sliding_window | def sliding_window(seq, lookahead):
"""
Create a sliding window with the specified number of lookahead characters.
"""
it = itertools.chain(iter(seq), ' ' * lookahead) # Padded iterator
window_size = lookahead + 1
result = tuple(itertools.islice(it, window_size))
if len(result) == window_size:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result | python | def sliding_window(seq, lookahead):
"""
Create a sliding window with the specified number of lookahead characters.
"""
it = itertools.chain(iter(seq), ' ' * lookahead) # Padded iterator
window_size = lookahead + 1
result = tuple(itertools.islice(it, window_size))
if len(result) == window_size:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result | [
"def",
"sliding_window",
"(",
"seq",
",",
"lookahead",
")",
":",
"it",
"=",
"itertools",
".",
"chain",
"(",
"iter",
"(",
"seq",
")",
",",
"' '",
"*",
"lookahead",
")",
"# Padded iterator",
"window_size",
"=",
"lookahead",
"+",
"1",
"result",
"=",
"tuple"... | Create a sliding window with the specified number of lookahead characters. | [
"Create",
"a",
"sliding",
"window",
"with",
"the",
"specified",
"number",
"of",
"lookahead",
"characters",
"."
] | 95fe5da1354d466d661cdc84e1637ce557700c8c | https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/common.py#L108-L119 | train | 213,596 |
rgs1/zk_shell | zk_shell/cli.py | get_params | def get_params():
""" get the cmdline params """
parser = argparse.ArgumentParser()
parser.add_argument("--connect-timeout",
type=float,
default=10.0,
help="ZK connect timeout")
parser.add_argument("--run-once",
type=str,
default="",
help="Run a command non-interactively and exit")
parser.add_argument("--run-from-stdin",
action="store_true",
default=False,
help="Read cmds from stdin, run them and exit")
parser.add_argument("--sync-connect",
action="store_true",
default=False,
help="Connect synchronously.")
parser.add_argument("--readonly",
action="store_true",
default=False,
help="Enable readonly.")
parser.add_argument("--tunnel",
type=str,
help="Create a ssh tunnel via this host",
default=None)
parser.add_argument("--version",
action="store_true",
default=False,
help="Display version and exit.")
parser.add_argument("hosts",
nargs="*",
help="ZK hosts to connect")
params = parser.parse_args()
return CLIParams(
params.connect_timeout,
params.run_once,
params.run_from_stdin,
params.sync_connect,
params.hosts,
params.readonly,
params.tunnel,
params.version
) | python | def get_params():
""" get the cmdline params """
parser = argparse.ArgumentParser()
parser.add_argument("--connect-timeout",
type=float,
default=10.0,
help="ZK connect timeout")
parser.add_argument("--run-once",
type=str,
default="",
help="Run a command non-interactively and exit")
parser.add_argument("--run-from-stdin",
action="store_true",
default=False,
help="Read cmds from stdin, run them and exit")
parser.add_argument("--sync-connect",
action="store_true",
default=False,
help="Connect synchronously.")
parser.add_argument("--readonly",
action="store_true",
default=False,
help="Enable readonly.")
parser.add_argument("--tunnel",
type=str,
help="Create a ssh tunnel via this host",
default=None)
parser.add_argument("--version",
action="store_true",
default=False,
help="Display version and exit.")
parser.add_argument("hosts",
nargs="*",
help="ZK hosts to connect")
params = parser.parse_args()
return CLIParams(
params.connect_timeout,
params.run_once,
params.run_from_stdin,
params.sync_connect,
params.hosts,
params.readonly,
params.tunnel,
params.version
) | [
"def",
"get_params",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"\"--connect-timeout\"",
",",
"type",
"=",
"float",
",",
"default",
"=",
"10.0",
",",
"help",
"=",
"\"ZK connect timeout\"",
"... | get the cmdline params | [
"get",
"the",
"cmdline",
"params"
] | bbf34fdfcf1f81100e2a5816fad8af6afc782a54 | https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/cli.py#L39-L83 | train | 213,597 |
rgs1/zk_shell | zk_shell/cli.py | set_unbuffered_mode | def set_unbuffered_mode():
"""
make output unbuffered
"""
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout) | python | def set_unbuffered_mode():
"""
make output unbuffered
"""
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout) | [
"def",
"set_unbuffered_mode",
"(",
")",
":",
"class",
"Unbuffered",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"stream",
")",
":",
"self",
".",
"stream",
"=",
"stream",
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"self",
".... | make output unbuffered | [
"make",
"output",
"unbuffered"
] | bbf34fdfcf1f81100e2a5816fad8af6afc782a54 | https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/cli.py#L97-L110 | train | 213,598 |
rgs1/zk_shell | zk_shell/acl.py | ACLReader.to_dict | def to_dict(cls, acl):
""" transform an ACL to a dict """
return {
"perms": acl.perms,
"id": {
"scheme": acl.id.scheme,
"id": acl.id.id
}
} | python | def to_dict(cls, acl):
""" transform an ACL to a dict """
return {
"perms": acl.perms,
"id": {
"scheme": acl.id.scheme,
"id": acl.id.id
}
} | [
"def",
"to_dict",
"(",
"cls",
",",
"acl",
")",
":",
"return",
"{",
"\"perms\"",
":",
"acl",
".",
"perms",
",",
"\"id\"",
":",
"{",
"\"scheme\"",
":",
"acl",
".",
"id",
".",
"scheme",
",",
"\"id\"",
":",
"acl",
".",
"id",
".",
"id",
"}",
"}"
] | transform an ACL to a dict | [
"transform",
"an",
"ACL",
"to",
"a",
"dict"
] | bbf34fdfcf1f81100e2a5816fad8af6afc782a54 | https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/acl.py#L74-L82 | train | 213,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.