repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
floydhub/floyd-cli | floyd/cli/auth.py | login | def login(token, apikey, username, password):
"""
Login to FloydHub.
"""
if manual_login_success(token, username, password):
return
if not apikey:
if has_browser():
apikey = wait_for_apikey()
else:
floyd_logger.error(
"No browser found, please login manually by creating login key at %s/settings/apikey.",
floyd.floyd_web_host)
sys.exit(1)
if apikey:
user = AuthClient().get_user(apikey, is_apikey=True)
AuthConfigManager.set_apikey(username=user.username, apikey=apikey)
floyd_logger.info("Login Successful as %s", user.username)
else:
floyd_logger.error("Login failed, please see --help for other login options.") | python | def login(token, apikey, username, password):
"""
Login to FloydHub.
"""
if manual_login_success(token, username, password):
return
if not apikey:
if has_browser():
apikey = wait_for_apikey()
else:
floyd_logger.error(
"No browser found, please login manually by creating login key at %s/settings/apikey.",
floyd.floyd_web_host)
sys.exit(1)
if apikey:
user = AuthClient().get_user(apikey, is_apikey=True)
AuthConfigManager.set_apikey(username=user.username, apikey=apikey)
floyd_logger.info("Login Successful as %s", user.username)
else:
floyd_logger.error("Login failed, please see --help for other login options.") | [
"def",
"login",
"(",
"token",
",",
"apikey",
",",
"username",
",",
"password",
")",
":",
"if",
"manual_login_success",
"(",
"token",
",",
"username",
",",
"password",
")",
":",
"return",
"if",
"not",
"apikey",
":",
"if",
"has_browser",
"(",
")",
":",
"... | Login to FloydHub. | [
"Login",
"to",
"FloydHub",
"."
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/cli/auth.py#L77-L98 | train | 23,200 |
floydhub/floyd-cli | floyd/main.py | check_cli_version | def check_cli_version():
"""
Check if the current cli version satisfies the server requirements
"""
should_exit = False
server_version = VersionClient().get_cli_version()
current_version = get_cli_version()
if LooseVersion(current_version) < LooseVersion(server_version.min_version):
print("\nYour version of CLI (%s) is no longer compatible with server." % current_version)
should_exit = True
elif LooseVersion(current_version) < LooseVersion(server_version.latest_version):
print("\nNew version of CLI (%s) is now available." % server_version.latest_version)
else:
return
# new version is ready
if should_exit and click.confirm('\nDo you want to upgrade to version %s now?' % server_version.latest_version):
auto_upgrade()
sys.exit(0)
else:
msg_parts = []
msg_parts.append("\nTo manually upgrade run:")
msg_parts.append(" pip install -U floyd-cli")
if is_conda_env():
msg_parts.append("Or if you prefer to use conda:")
msg_parts.append(" conda install -y -c conda-forge -c floydhub floyd-cli")
print("\n".join(msg_parts))
print("")
if should_exit:
sys.exit(0) | python | def check_cli_version():
"""
Check if the current cli version satisfies the server requirements
"""
should_exit = False
server_version = VersionClient().get_cli_version()
current_version = get_cli_version()
if LooseVersion(current_version) < LooseVersion(server_version.min_version):
print("\nYour version of CLI (%s) is no longer compatible with server." % current_version)
should_exit = True
elif LooseVersion(current_version) < LooseVersion(server_version.latest_version):
print("\nNew version of CLI (%s) is now available." % server_version.latest_version)
else:
return
# new version is ready
if should_exit and click.confirm('\nDo you want to upgrade to version %s now?' % server_version.latest_version):
auto_upgrade()
sys.exit(0)
else:
msg_parts = []
msg_parts.append("\nTo manually upgrade run:")
msg_parts.append(" pip install -U floyd-cli")
if is_conda_env():
msg_parts.append("Or if you prefer to use conda:")
msg_parts.append(" conda install -y -c conda-forge -c floydhub floyd-cli")
print("\n".join(msg_parts))
print("")
if should_exit:
sys.exit(0) | [
"def",
"check_cli_version",
"(",
")",
":",
"should_exit",
"=",
"False",
"server_version",
"=",
"VersionClient",
"(",
")",
".",
"get_cli_version",
"(",
")",
"current_version",
"=",
"get_cli_version",
"(",
")",
"if",
"LooseVersion",
"(",
"current_version",
")",
"<... | Check if the current cli version satisfies the server requirements | [
"Check",
"if",
"the",
"current",
"cli",
"version",
"satisfies",
"the",
"server",
"requirements"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/main.py#L36-L67 | train | 23,201 |
floydhub/floyd-cli | floyd/client/base.py | FloydHttpClient.request | def request(self,
method,
url,
params=None,
data=None,
files=None,
json=None,
timeout=5,
headers=None,
skip_auth=False):
"""
Execute the request using requests library
"""
request_url = self.base_url + url
floyd_logger.debug("Starting request to url: %s with params: %s, data: %s", request_url, params, data)
request_headers = {'x-floydhub-cli-version': get_cli_version()}
# Auth headers if present
if self.auth_header:
request_headers["Authorization"] = self.auth_header
# Add any additional headers
if headers:
request_headers.update(headers)
try:
response = requests.request(method,
request_url,
params=params,
data=data,
json=json,
headers=request_headers,
files=files,
timeout=timeout)
except requests.exceptions.ConnectionError as exception:
floyd_logger.debug("Exception: %s", exception, exc_info=True)
sys.exit("Cannot connect to the Floyd server. Check your internet connection.")
except requests.exceptions.Timeout as exception:
floyd_logger.debug("Exception: %s", exception, exc_info=True)
sys.exit("Connection to FloydHub server timed out. Please retry or check your internet connection.")
floyd_logger.debug("Response Content: %s, Headers: %s" % (response.content, response.headers))
self.check_response_status(response)
return response | python | def request(self,
method,
url,
params=None,
data=None,
files=None,
json=None,
timeout=5,
headers=None,
skip_auth=False):
"""
Execute the request using requests library
"""
request_url = self.base_url + url
floyd_logger.debug("Starting request to url: %s with params: %s, data: %s", request_url, params, data)
request_headers = {'x-floydhub-cli-version': get_cli_version()}
# Auth headers if present
if self.auth_header:
request_headers["Authorization"] = self.auth_header
# Add any additional headers
if headers:
request_headers.update(headers)
try:
response = requests.request(method,
request_url,
params=params,
data=data,
json=json,
headers=request_headers,
files=files,
timeout=timeout)
except requests.exceptions.ConnectionError as exception:
floyd_logger.debug("Exception: %s", exception, exc_info=True)
sys.exit("Cannot connect to the Floyd server. Check your internet connection.")
except requests.exceptions.Timeout as exception:
floyd_logger.debug("Exception: %s", exception, exc_info=True)
sys.exit("Connection to FloydHub server timed out. Please retry or check your internet connection.")
floyd_logger.debug("Response Content: %s, Headers: %s" % (response.content, response.headers))
self.check_response_status(response)
return response | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"url",
",",
"params",
"=",
"None",
",",
"data",
"=",
"None",
",",
"files",
"=",
"None",
",",
"json",
"=",
"None",
",",
"timeout",
"=",
"5",
",",
"headers",
"=",
"None",
",",
"skip_auth",
"=",
"Fa... | Execute the request using requests library | [
"Execute",
"the",
"request",
"using",
"requests",
"library"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/base.py#L30-L72 | train | 23,202 |
floydhub/floyd-cli | floyd/client/base.py | FloydHttpClient.download | def download(self, url, filename, relative=False, headers=None, timeout=5):
"""
Download the file from the given url at the current path
"""
request_url = self.base_url + url if relative else url
floyd_logger.debug("Downloading file from url: {}".format(request_url))
# Auth headers if present
request_headers = {}
if self.auth_header:
request_headers["Authorization"] = self.auth_header
# Add any additional headers
if headers:
request_headers.update(headers)
try:
response = requests.get(request_url,
headers=request_headers,
timeout=timeout,
stream=True)
self.check_response_status(response)
with open(filename, 'wb') as f:
# chunk mode response doesn't have content-length so we are
# using a custom header here
content_length = response.headers.get('x-floydhub-content-length')
if not content_length:
content_length = response.headers.get('content-length')
if content_length:
for chunk in progress.bar(response.iter_content(chunk_size=1024),
expected_size=(int(content_length) / 1024) + 1):
if chunk:
f.write(chunk)
else:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return filename
except requests.exceptions.ConnectionError as exception:
floyd_logger.debug("Exception: {}".format(exception))
sys.exit("Cannot connect to the Floyd server. Check your internet connection.") | python | def download(self, url, filename, relative=False, headers=None, timeout=5):
"""
Download the file from the given url at the current path
"""
request_url = self.base_url + url if relative else url
floyd_logger.debug("Downloading file from url: {}".format(request_url))
# Auth headers if present
request_headers = {}
if self.auth_header:
request_headers["Authorization"] = self.auth_header
# Add any additional headers
if headers:
request_headers.update(headers)
try:
response = requests.get(request_url,
headers=request_headers,
timeout=timeout,
stream=True)
self.check_response_status(response)
with open(filename, 'wb') as f:
# chunk mode response doesn't have content-length so we are
# using a custom header here
content_length = response.headers.get('x-floydhub-content-length')
if not content_length:
content_length = response.headers.get('content-length')
if content_length:
for chunk in progress.bar(response.iter_content(chunk_size=1024),
expected_size=(int(content_length) / 1024) + 1):
if chunk:
f.write(chunk)
else:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return filename
except requests.exceptions.ConnectionError as exception:
floyd_logger.debug("Exception: {}".format(exception))
sys.exit("Cannot connect to the Floyd server. Check your internet connection.") | [
"def",
"download",
"(",
"self",
",",
"url",
",",
"filename",
",",
"relative",
"=",
"False",
",",
"headers",
"=",
"None",
",",
"timeout",
"=",
"5",
")",
":",
"request_url",
"=",
"self",
".",
"base_url",
"+",
"url",
"if",
"relative",
"else",
"url",
"fl... | Download the file from the given url at the current path | [
"Download",
"the",
"file",
"from",
"the",
"given",
"url",
"at",
"the",
"current",
"path"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/base.py#L74-L113 | train | 23,203 |
floydhub/floyd-cli | floyd/client/base.py | FloydHttpClient.download_tar | def download_tar(self, url, untar=True, delete_after_untar=False, destination_dir='.'):
"""
Download and optionally untar the tar file from the given url
"""
try:
floyd_logger.info("Downloading the tar file to the current directory ...")
filename = self.download(url=url, filename='output.tar')
if filename and untar:
floyd_logger.info("Untarring the contents of the file ...")
tar = tarfile.open(filename)
tar.extractall(path=destination_dir)
tar.close()
if delete_after_untar:
floyd_logger.info("Cleaning up the tar file ...")
os.remove(filename)
return filename
except FloydException as e:
floyd_logger.info("Download URL ERROR! {}".format(e.message))
return False | python | def download_tar(self, url, untar=True, delete_after_untar=False, destination_dir='.'):
"""
Download and optionally untar the tar file from the given url
"""
try:
floyd_logger.info("Downloading the tar file to the current directory ...")
filename = self.download(url=url, filename='output.tar')
if filename and untar:
floyd_logger.info("Untarring the contents of the file ...")
tar = tarfile.open(filename)
tar.extractall(path=destination_dir)
tar.close()
if delete_after_untar:
floyd_logger.info("Cleaning up the tar file ...")
os.remove(filename)
return filename
except FloydException as e:
floyd_logger.info("Download URL ERROR! {}".format(e.message))
return False | [
"def",
"download_tar",
"(",
"self",
",",
"url",
",",
"untar",
"=",
"True",
",",
"delete_after_untar",
"=",
"False",
",",
"destination_dir",
"=",
"'.'",
")",
":",
"try",
":",
"floyd_logger",
".",
"info",
"(",
"\"Downloading the tar file to the current directory ...... | Download and optionally untar the tar file from the given url | [
"Download",
"and",
"optionally",
"untar",
"the",
"tar",
"file",
"from",
"the",
"given",
"url"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/base.py#L115-L133 | train | 23,204 |
floydhub/floyd-cli | floyd/client/base.py | FloydHttpClient.check_response_status | def check_response_status(self, response):
"""
Check if response is successful. Else raise Exception.
"""
if not (200 <= response.status_code < 300):
try:
message = response.json()["errors"]
except Exception:
message = None
floyd_logger.debug("Error received : status_code: {}, message: {}".format(response.status_code,
message or response.content))
if response.status_code == 400:
raise BadRequestException(response)
elif response.status_code == 401:
raise AuthenticationException()
elif response.status_code == 403:
raise AuthorizationException(response)
elif response.status_code == 404:
raise NotFoundException()
elif response.status_code == 429:
raise OverLimitException(response.json().get("message"))
elif response.status_code == 502:
raise BadGatewayException()
elif response.status_code == 504:
raise GatewayTimeoutException()
elif response.status_code == 423:
raise LockedException()
elif 500 <= response.status_code < 600:
if 'Server under maintenance' in response.content.decode():
raise ServerException('Server under maintenance, please try again later.')
else:
raise ServerException()
else:
msg = "An error occurred. Server response: {}".format(response.status_code)
raise FloydException(message=msg) | python | def check_response_status(self, response):
"""
Check if response is successful. Else raise Exception.
"""
if not (200 <= response.status_code < 300):
try:
message = response.json()["errors"]
except Exception:
message = None
floyd_logger.debug("Error received : status_code: {}, message: {}".format(response.status_code,
message or response.content))
if response.status_code == 400:
raise BadRequestException(response)
elif response.status_code == 401:
raise AuthenticationException()
elif response.status_code == 403:
raise AuthorizationException(response)
elif response.status_code == 404:
raise NotFoundException()
elif response.status_code == 429:
raise OverLimitException(response.json().get("message"))
elif response.status_code == 502:
raise BadGatewayException()
elif response.status_code == 504:
raise GatewayTimeoutException()
elif response.status_code == 423:
raise LockedException()
elif 500 <= response.status_code < 600:
if 'Server under maintenance' in response.content.decode():
raise ServerException('Server under maintenance, please try again later.')
else:
raise ServerException()
else:
msg = "An error occurred. Server response: {}".format(response.status_code)
raise FloydException(message=msg) | [
"def",
"check_response_status",
"(",
"self",
",",
"response",
")",
":",
"if",
"not",
"(",
"200",
"<=",
"response",
".",
"status_code",
"<",
"300",
")",
":",
"try",
":",
"message",
"=",
"response",
".",
"json",
"(",
")",
"[",
"\"errors\"",
"]",
"except"... | Check if response is successful. Else raise Exception. | [
"Check",
"if",
"response",
"is",
"successful",
".",
"Else",
"raise",
"Exception",
"."
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/base.py#L135-L169 | train | 23,205 |
floydhub/floyd-cli | floyd/development/dev.py | cli | def cli(verbose):
"""
Floyd CLI interacts with FloydHub server and executes your commands.
More help is available under each command listed below.
"""
floyd.floyd_host = floyd.floyd_web_host = "https://dev.floydhub.com"
floyd.tus_server_endpoint = "https://upload-v2-dev.floydhub.com/api/v1/upload/"
configure_logger(verbose)
check_cli_version() | python | def cli(verbose):
"""
Floyd CLI interacts with FloydHub server and executes your commands.
More help is available under each command listed below.
"""
floyd.floyd_host = floyd.floyd_web_host = "https://dev.floydhub.com"
floyd.tus_server_endpoint = "https://upload-v2-dev.floydhub.com/api/v1/upload/"
configure_logger(verbose)
check_cli_version() | [
"def",
"cli",
"(",
"verbose",
")",
":",
"floyd",
".",
"floyd_host",
"=",
"floyd",
".",
"floyd_web_host",
"=",
"\"https://dev.floydhub.com\"",
"floyd",
".",
"tus_server_endpoint",
"=",
"\"https://upload-v2-dev.floydhub.com/api/v1/upload/\"",
"configure_logger",
"(",
"verbo... | Floyd CLI interacts with FloydHub server and executes your commands.
More help is available under each command listed below. | [
"Floyd",
"CLI",
"interacts",
"with",
"FloydHub",
"server",
"and",
"executes",
"your",
"commands",
".",
"More",
"help",
"is",
"available",
"under",
"each",
"command",
"listed",
"below",
"."
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/development/dev.py#L10-L18 | train | 23,206 |
floydhub/floyd-cli | floyd/client/files.py | get_unignored_file_paths | def get_unignored_file_paths(ignore_list=None, whitelist=None):
"""
Given an ignore_list and a whitelist of glob patterns, returns the list of
unignored file paths in the current directory and its subdirectories
"""
unignored_files = []
if ignore_list is None:
ignore_list = []
if whitelist is None:
whitelist = []
for root, dirs, files in os.walk("."):
floyd_logger.debug("Root:%s, Dirs:%s", root, dirs)
if ignore_path(unix_style_path(root), ignore_list, whitelist):
# Reset dirs to avoid going further down this directory.
# Then continue to the next iteration of os.walk, which causes
# everything in this directory to be ignored.
#
# Note that whitelisted files that are within directories that are
# ignored will not be whitelisted. This follows the expected
# behavior established by .gitignore logic:
# "It is not possible to re-include a file if a parent directory of
# that file is excluded."
# https://git-scm.com/docs/gitignore#_pattern_format
dirs[:] = []
floyd_logger.debug("Ignoring directory : %s", root)
continue
for file_name in files:
file_path = unix_style_path(os.path.join(root, file_name))
if ignore_path(file_path, ignore_list, whitelist):
floyd_logger.debug("Ignoring file : %s", file_name)
continue
unignored_files.append(os.path.join(root, file_name))
return unignored_files | python | def get_unignored_file_paths(ignore_list=None, whitelist=None):
"""
Given an ignore_list and a whitelist of glob patterns, returns the list of
unignored file paths in the current directory and its subdirectories
"""
unignored_files = []
if ignore_list is None:
ignore_list = []
if whitelist is None:
whitelist = []
for root, dirs, files in os.walk("."):
floyd_logger.debug("Root:%s, Dirs:%s", root, dirs)
if ignore_path(unix_style_path(root), ignore_list, whitelist):
# Reset dirs to avoid going further down this directory.
# Then continue to the next iteration of os.walk, which causes
# everything in this directory to be ignored.
#
# Note that whitelisted files that are within directories that are
# ignored will not be whitelisted. This follows the expected
# behavior established by .gitignore logic:
# "It is not possible to re-include a file if a parent directory of
# that file is excluded."
# https://git-scm.com/docs/gitignore#_pattern_format
dirs[:] = []
floyd_logger.debug("Ignoring directory : %s", root)
continue
for file_name in files:
file_path = unix_style_path(os.path.join(root, file_name))
if ignore_path(file_path, ignore_list, whitelist):
floyd_logger.debug("Ignoring file : %s", file_name)
continue
unignored_files.append(os.path.join(root, file_name))
return unignored_files | [
"def",
"get_unignored_file_paths",
"(",
"ignore_list",
"=",
"None",
",",
"whitelist",
"=",
"None",
")",
":",
"unignored_files",
"=",
"[",
"]",
"if",
"ignore_list",
"is",
"None",
":",
"ignore_list",
"=",
"[",
"]",
"if",
"whitelist",
"is",
"None",
":",
"whit... | Given an ignore_list and a whitelist of glob patterns, returns the list of
unignored file paths in the current directory and its subdirectories | [
"Given",
"an",
"ignore_list",
"and",
"a",
"whitelist",
"of",
"glob",
"patterns",
"returns",
"the",
"list",
"of",
"unignored",
"file",
"paths",
"in",
"the",
"current",
"directory",
"and",
"its",
"subdirectories"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/files.py#L22-L59 | train | 23,207 |
floydhub/floyd-cli | floyd/client/files.py | ignore_path | def ignore_path(path, ignore_list=None, whitelist=None):
"""
Returns a boolean indicating if a path should be ignored given an
ignore_list and a whitelist of glob patterns.
"""
if ignore_list is None:
return True
should_ignore = matches_glob_list(path, ignore_list)
if whitelist is None:
return should_ignore
return should_ignore and not matches_glob_list(path, whitelist) | python | def ignore_path(path, ignore_list=None, whitelist=None):
"""
Returns a boolean indicating if a path should be ignored given an
ignore_list and a whitelist of glob patterns.
"""
if ignore_list is None:
return True
should_ignore = matches_glob_list(path, ignore_list)
if whitelist is None:
return should_ignore
return should_ignore and not matches_glob_list(path, whitelist) | [
"def",
"ignore_path",
"(",
"path",
",",
"ignore_list",
"=",
"None",
",",
"whitelist",
"=",
"None",
")",
":",
"if",
"ignore_list",
"is",
"None",
":",
"return",
"True",
"should_ignore",
"=",
"matches_glob_list",
"(",
"path",
",",
"ignore_list",
")",
"if",
"w... | Returns a boolean indicating if a path should be ignored given an
ignore_list and a whitelist of glob patterns. | [
"Returns",
"a",
"boolean",
"indicating",
"if",
"a",
"path",
"should",
"be",
"ignored",
"given",
"an",
"ignore_list",
"and",
"a",
"whitelist",
"of",
"glob",
"patterns",
"."
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/files.py#L62-L74 | train | 23,208 |
floydhub/floyd-cli | floyd/client/files.py | matches_glob_list | def matches_glob_list(path, glob_list):
"""
Given a list of glob patterns, returns a boolean
indicating if a path matches any glob in the list
"""
for glob in glob_list:
try:
if PurePath(path).match(glob):
return True
except TypeError:
pass
return False | python | def matches_glob_list(path, glob_list):
"""
Given a list of glob patterns, returns a boolean
indicating if a path matches any glob in the list
"""
for glob in glob_list:
try:
if PurePath(path).match(glob):
return True
except TypeError:
pass
return False | [
"def",
"matches_glob_list",
"(",
"path",
",",
"glob_list",
")",
":",
"for",
"glob",
"in",
"glob_list",
":",
"try",
":",
"if",
"PurePath",
"(",
"path",
")",
".",
"match",
"(",
"glob",
")",
":",
"return",
"True",
"except",
"TypeError",
":",
"pass",
"retu... | Given a list of glob patterns, returns a boolean
indicating if a path matches any glob in the list | [
"Given",
"a",
"list",
"of",
"glob",
"patterns",
"returns",
"a",
"boolean",
"indicating",
"if",
"a",
"path",
"matches",
"any",
"glob",
"in",
"the",
"list"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/files.py#L77-L88 | train | 23,209 |
floydhub/floyd-cli | floyd/client/files.py | get_files_in_current_directory | def get_files_in_current_directory(file_type):
"""
Gets the list of files in the current directory and subdirectories.
Respects .floydignore file if present
"""
local_files = []
total_file_size = 0
ignore_list, whitelist = FloydIgnoreManager.get_lists()
floyd_logger.debug("Ignoring: %s", ignore_list)
floyd_logger.debug("Whitelisting: %s", whitelist)
file_paths = get_unignored_file_paths(ignore_list, whitelist)
for file_path in file_paths:
local_files.append((file_type, (unix_style_path(file_path), open(file_path, 'rb'), 'text/plain')))
total_file_size += os.path.getsize(file_path)
return (local_files, total_file_size) | python | def get_files_in_current_directory(file_type):
"""
Gets the list of files in the current directory and subdirectories.
Respects .floydignore file if present
"""
local_files = []
total_file_size = 0
ignore_list, whitelist = FloydIgnoreManager.get_lists()
floyd_logger.debug("Ignoring: %s", ignore_list)
floyd_logger.debug("Whitelisting: %s", whitelist)
file_paths = get_unignored_file_paths(ignore_list, whitelist)
for file_path in file_paths:
local_files.append((file_type, (unix_style_path(file_path), open(file_path, 'rb'), 'text/plain')))
total_file_size += os.path.getsize(file_path)
return (local_files, total_file_size) | [
"def",
"get_files_in_current_directory",
"(",
"file_type",
")",
":",
"local_files",
"=",
"[",
"]",
"total_file_size",
"=",
"0",
"ignore_list",
",",
"whitelist",
"=",
"FloydIgnoreManager",
".",
"get_lists",
"(",
")",
"floyd_logger",
".",
"debug",
"(",
"\"Ignoring: ... | Gets the list of files in the current directory and subdirectories.
Respects .floydignore file if present | [
"Gets",
"the",
"list",
"of",
"files",
"in",
"the",
"current",
"directory",
"and",
"subdirectories",
".",
"Respects",
".",
"floydignore",
"file",
"if",
"present"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/files.py#L91-L110 | train | 23,210 |
floydhub/floyd-cli | floyd/client/files.py | DataCompressor.__get_nfiles_to_compress | def __get_nfiles_to_compress(self):
"""
Return the number of files to compress
Note: it should take about 0.1s for counting 100k files on a dual core machine
"""
floyd_logger.info("Get number of files to compress... (this could take a few seconds)")
paths = [self.source_dir]
try:
# Traverse each subdirs of source_dir and count files/dirs
while paths:
path = paths.pop()
for item in scandir(path):
if item.is_dir():
paths.append(item.path)
self.__files_to_compress += 1
elif item.is_file():
self.__files_to_compress += 1
except OSError as e:
# OSError: [Errno 13] Permission denied
if e.errno == errno.EACCES:
self.source_dir = os.getcwd() if self.source_dir == '.' else self.source_dir # Expand cwd
sys.exit(("Permission denied. Make sure to have read permission "
"for all the files and directories in the path: %s")
% (self.source_dir))
floyd_logger.info("Compressing %d files", self.__files_to_compress) | python | def __get_nfiles_to_compress(self):
"""
Return the number of files to compress
Note: it should take about 0.1s for counting 100k files on a dual core machine
"""
floyd_logger.info("Get number of files to compress... (this could take a few seconds)")
paths = [self.source_dir]
try:
# Traverse each subdirs of source_dir and count files/dirs
while paths:
path = paths.pop()
for item in scandir(path):
if item.is_dir():
paths.append(item.path)
self.__files_to_compress += 1
elif item.is_file():
self.__files_to_compress += 1
except OSError as e:
# OSError: [Errno 13] Permission denied
if e.errno == errno.EACCES:
self.source_dir = os.getcwd() if self.source_dir == '.' else self.source_dir # Expand cwd
sys.exit(("Permission denied. Make sure to have read permission "
"for all the files and directories in the path: %s")
% (self.source_dir))
floyd_logger.info("Compressing %d files", self.__files_to_compress) | [
"def",
"__get_nfiles_to_compress",
"(",
"self",
")",
":",
"floyd_logger",
".",
"info",
"(",
"\"Get number of files to compress... (this could take a few seconds)\"",
")",
"paths",
"=",
"[",
"self",
".",
"source_dir",
"]",
"try",
":",
"# Traverse each subdirs of source_dir a... | Return the number of files to compress
Note: it should take about 0.1s for counting 100k files on a dual core machine | [
"Return",
"the",
"number",
"of",
"files",
"to",
"compress"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/files.py#L153-L178 | train | 23,211 |
floydhub/floyd-cli | floyd/client/files.py | DataCompressor.create_tarfile | def create_tarfile(self):
"""
Create a tar file with the contents of the current directory
"""
floyd_logger.info("Compressing data...")
# Show progress bar (file_compressed/file_to_compress)
self.__compression_bar = ProgressBar(expected_size=self.__files_to_compress, filled_char='=')
# Auxiliary functions
def dfilter_file_counter(tarinfo):
"""
Dummy filter function used to track the progression at file levels.
"""
self.__compression_bar.show(self.__files_compressed)
self.__files_compressed += 1
return tarinfo
def warn_purge_exit(info_msg, filename, progress_bar, exit_msg):
"""
Warn the user that's something went wrong,
remove the tarball and provide an exit message.
"""
progress_bar.done()
floyd_logger.info(info_msg)
rmtree(os.path.dirname(filename))
sys.exit(exit_msg)
try:
# Define the default signal handler for catching: Ctrl-C
signal.signal(signal.SIGINT, signal.default_int_handler)
with tarfile.open(self.filename, "w:gz") as tar:
tar.add(self.source_dir, arcname=os.path.basename(self.source_dir), filter=dfilter_file_counter)
self.__compression_bar.done()
except (OSError, IOError) as e:
# OSError: [Errno 13] Permission denied
if e.errno == errno.EACCES:
self.source_dir = os.getcwd() if self.source_dir == '.' else self.source_dir # Expand cwd
warn_purge_exit(info_msg="Permission denied. Removing compressed data...",
filename=self.filename,
progress_bar=self.__compression_bar,
exit_msg=("Permission denied. Make sure to have read permission "
"for all the files and directories in the path: %s")
% (self.source_dir))
# OSError: [Errno 28] No Space Left on Device (IOError on python2.7)
elif e.errno == errno.ENOSPC:
dir_path = os.path.dirname(self.filename)
warn_purge_exit(info_msg="No space left. Removing compressed data...",
filename=self.filename,
progress_bar=self.__compression_bar,
exit_msg=("No space left when compressing your data in: %s.\n"
"Make sure to have enough space before uploading your data.")
% (os.path.abspath(dir_path)))
except KeyboardInterrupt: # Purge tarball on Ctrl-C
warn_purge_exit(info_msg="Ctrl-C signal detected: Removing compressed data...",
filename=self.filename,
progress_bar=self.__compression_bar,
exit_msg="Stopped the data upload gracefully.") | python | def create_tarfile(self):
"""
Create a tar file with the contents of the current directory
"""
floyd_logger.info("Compressing data...")
# Show progress bar (file_compressed/file_to_compress)
self.__compression_bar = ProgressBar(expected_size=self.__files_to_compress, filled_char='=')
# Auxiliary functions
def dfilter_file_counter(tarinfo):
"""
Dummy filter function used to track the progression at file levels.
"""
self.__compression_bar.show(self.__files_compressed)
self.__files_compressed += 1
return tarinfo
def warn_purge_exit(info_msg, filename, progress_bar, exit_msg):
"""
Warn the user that's something went wrong,
remove the tarball and provide an exit message.
"""
progress_bar.done()
floyd_logger.info(info_msg)
rmtree(os.path.dirname(filename))
sys.exit(exit_msg)
try:
# Define the default signal handler for catching: Ctrl-C
signal.signal(signal.SIGINT, signal.default_int_handler)
with tarfile.open(self.filename, "w:gz") as tar:
tar.add(self.source_dir, arcname=os.path.basename(self.source_dir), filter=dfilter_file_counter)
self.__compression_bar.done()
except (OSError, IOError) as e:
# OSError: [Errno 13] Permission denied
if e.errno == errno.EACCES:
self.source_dir = os.getcwd() if self.source_dir == '.' else self.source_dir # Expand cwd
warn_purge_exit(info_msg="Permission denied. Removing compressed data...",
filename=self.filename,
progress_bar=self.__compression_bar,
exit_msg=("Permission denied. Make sure to have read permission "
"for all the files and directories in the path: %s")
% (self.source_dir))
# OSError: [Errno 28] No Space Left on Device (IOError on python2.7)
elif e.errno == errno.ENOSPC:
dir_path = os.path.dirname(self.filename)
warn_purge_exit(info_msg="No space left. Removing compressed data...",
filename=self.filename,
progress_bar=self.__compression_bar,
exit_msg=("No space left when compressing your data in: %s.\n"
"Make sure to have enough space before uploading your data.")
% (os.path.abspath(dir_path)))
except KeyboardInterrupt: # Purge tarball on Ctrl-C
warn_purge_exit(info_msg="Ctrl-C signal detected: Removing compressed data...",
filename=self.filename,
progress_bar=self.__compression_bar,
exit_msg="Stopped the data upload gracefully.") | [
"def",
"create_tarfile",
"(",
"self",
")",
":",
"floyd_logger",
".",
"info",
"(",
"\"Compressing data...\"",
")",
"# Show progress bar (file_compressed/file_to_compress)",
"self",
".",
"__compression_bar",
"=",
"ProgressBar",
"(",
"expected_size",
"=",
"self",
".",
"__f... | Create a tar file with the contents of the current directory | [
"Create",
"a",
"tar",
"file",
"with",
"the",
"contents",
"of",
"the",
"current",
"directory"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/files.py#L180-L237 | train | 23,212 |
floydhub/floyd-cli | floyd/client/data.py | DataClient.create | def create(self, data):
"""
Create a temporary directory for the tar file that will be removed at
the end of the operation.
"""
try:
floyd_logger.info("Making create request to server...")
post_body = data.to_dict()
post_body["resumable"] = True
response = self.request("POST", self.url, json=post_body)
return response.json()
except BadRequestException as e:
if 'Dataset not found, ID' in e.message:
floyd_logger.error(
'Data create: ERROR! Please run "floyd data init DATASET_NAME" before upload.')
else:
floyd_logger.error('Data create: ERROR! %s', e.message)
return None
except FloydException as e:
floyd_logger.error("Data create: ERROR! %s", e.message)
return None | python | def create(self, data):
"""
Create a temporary directory for the tar file that will be removed at
the end of the operation.
"""
try:
floyd_logger.info("Making create request to server...")
post_body = data.to_dict()
post_body["resumable"] = True
response = self.request("POST", self.url, json=post_body)
return response.json()
except BadRequestException as e:
if 'Dataset not found, ID' in e.message:
floyd_logger.error(
'Data create: ERROR! Please run "floyd data init DATASET_NAME" before upload.')
else:
floyd_logger.error('Data create: ERROR! %s', e.message)
return None
except FloydException as e:
floyd_logger.error("Data create: ERROR! %s", e.message)
return None | [
"def",
"create",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"floyd_logger",
".",
"info",
"(",
"\"Making create request to server...\"",
")",
"post_body",
"=",
"data",
".",
"to_dict",
"(",
")",
"post_body",
"[",
"\"resumable\"",
"]",
"=",
"True",
"respon... | Create a temporary directory for the tar file that will be removed at
the end of the operation. | [
"Create",
"a",
"temporary",
"directory",
"for",
"the",
"tar",
"file",
"that",
"will",
"be",
"removed",
"at",
"the",
"end",
"of",
"the",
"operation",
"."
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/client/data.py#L27-L47 | train | 23,213 |
floydhub/floyd-cli | floyd/cli/run.py | get_command_line | def get_command_line(instance_type, env, message, data, mode, open_notebook, command_str):
"""
Return a string representing the full floyd command entered in the command line
"""
floyd_command = ["floyd", "run"]
if instance_type:
floyd_command.append('--' + INSTANCE_NAME_MAP[instance_type])
if env and not env == DEFAULT_ENV:
floyd_command += ["--env", env]
if message:
floyd_command += ["--message", shell_quote(message)]
if data:
for data_item in data:
parts = data_item.split(':')
if len(parts) > 1:
data_item = normalize_data_name(parts[0], use_data_config=False) + ':' + parts[1]
floyd_command += ["--data", data_item]
if mode and mode != "job":
floyd_command += ["--mode", mode]
if mode == 'jupyter':
if not open_notebook:
floyd_command.append("--no-open")
else:
if command_str:
floyd_command.append(shell_quote(command_str))
return ' '.join(floyd_command) | python | def get_command_line(instance_type, env, message, data, mode, open_notebook, command_str):
"""
Return a string representing the full floyd command entered in the command line
"""
floyd_command = ["floyd", "run"]
if instance_type:
floyd_command.append('--' + INSTANCE_NAME_MAP[instance_type])
if env and not env == DEFAULT_ENV:
floyd_command += ["--env", env]
if message:
floyd_command += ["--message", shell_quote(message)]
if data:
for data_item in data:
parts = data_item.split(':')
if len(parts) > 1:
data_item = normalize_data_name(parts[0], use_data_config=False) + ':' + parts[1]
floyd_command += ["--data", data_item]
if mode and mode != "job":
floyd_command += ["--mode", mode]
if mode == 'jupyter':
if not open_notebook:
floyd_command.append("--no-open")
else:
if command_str:
floyd_command.append(shell_quote(command_str))
return ' '.join(floyd_command) | [
"def",
"get_command_line",
"(",
"instance_type",
",",
"env",
",",
"message",
",",
"data",
",",
"mode",
",",
"open_notebook",
",",
"command_str",
")",
":",
"floyd_command",
"=",
"[",
"\"floyd\"",
",",
"\"run\"",
"]",
"if",
"instance_type",
":",
"floyd_command",... | Return a string representing the full floyd command entered in the command line | [
"Return",
"a",
"string",
"representing",
"the",
"full",
"floyd",
"command",
"entered",
"in",
"the",
"command",
"line"
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/cli/run.py#L353-L380 | train | 23,214 |
floydhub/floyd-cli | floyd/cli/run.py | restart | def restart(ctx, job_name, data, open_notebook, env, message, gpu, cpu, gpup, cpup, command):
"""
Restart a finished job as a new job.
"""
# Error early if more than one --env is passed. Then get the first/only
# --env out of the list so all other operations work normally (they don't
# expect an iterable). For details on this approach, see the comment above
# the --env click option
if len(env) > 1:
floyd_logger.error(
"You passed more than one environment: {}. Please specify a single environment.".format(env)
)
sys.exit(1)
env = env[0]
parameters = {}
expt_client = ExperimentClient()
try:
job = expt_client.get(normalize_job_name(job_name))
except FloydException:
job = expt_client.get(job_name)
if gpu:
instance_type = G1_INSTANCE_TYPE
elif cpu:
instance_type = C1_INSTANCE_TYPE
else:
instance_type = job.instance_type
if instance_type is not None:
parameters['instance_type'] = instance_type
else:
instance_type = job.instance_type
if env is not None:
arch = INSTANCE_ARCH_MAP[instance_type]
if not validate_env(env, arch):
sys.exit(1)
parameters['env'] = env
success, data_ids, show_data_info = process_data_ids(data)
if not success:
sys.exit(1)
if data_ids:
parameters['data_ids'] = data_ids
if message:
parameters['description'] = message
if command:
parameters['command'] = ' '.join(command)
floyd_logger.info('Restarting job %s...', job_name)
new_job_info = expt_client.restart(job.id, parameters=parameters)
if not new_job_info:
floyd_logger.error("Failed to restart job")
sys.exit(1)
show_new_job_info(expt_client, new_job_info['name'], new_job_info, job.mode, open_notebook, show_data_info) | python | def restart(ctx, job_name, data, open_notebook, env, message, gpu, cpu, gpup, cpup, command):
"""
Restart a finished job as a new job.
"""
# Error early if more than one --env is passed. Then get the first/only
# --env out of the list so all other operations work normally (they don't
# expect an iterable). For details on this approach, see the comment above
# the --env click option
if len(env) > 1:
floyd_logger.error(
"You passed more than one environment: {}. Please specify a single environment.".format(env)
)
sys.exit(1)
env = env[0]
parameters = {}
expt_client = ExperimentClient()
try:
job = expt_client.get(normalize_job_name(job_name))
except FloydException:
job = expt_client.get(job_name)
if gpu:
instance_type = G1_INSTANCE_TYPE
elif cpu:
instance_type = C1_INSTANCE_TYPE
else:
instance_type = job.instance_type
if instance_type is not None:
parameters['instance_type'] = instance_type
else:
instance_type = job.instance_type
if env is not None:
arch = INSTANCE_ARCH_MAP[instance_type]
if not validate_env(env, arch):
sys.exit(1)
parameters['env'] = env
success, data_ids, show_data_info = process_data_ids(data)
if not success:
sys.exit(1)
if data_ids:
parameters['data_ids'] = data_ids
if message:
parameters['description'] = message
if command:
parameters['command'] = ' '.join(command)
floyd_logger.info('Restarting job %s...', job_name)
new_job_info = expt_client.restart(job.id, parameters=parameters)
if not new_job_info:
floyd_logger.error("Failed to restart job")
sys.exit(1)
show_new_job_info(expt_client, new_job_info['name'], new_job_info, job.mode, open_notebook, show_data_info) | [
"def",
"restart",
"(",
"ctx",
",",
"job_name",
",",
"data",
",",
"open_notebook",
",",
"env",
",",
"message",
",",
"gpu",
",",
"cpu",
",",
"gpup",
",",
"cpup",
",",
"command",
")",
":",
"# Error early if more than one --env is passed. Then get the first/only",
"... | Restart a finished job as a new job. | [
"Restart",
"a",
"finished",
"job",
"as",
"a",
"new",
"job",
"."
] | ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c | https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/cli/run.py#L405-L466 | train | 23,215 |
yvesalexandre/bandicoot | bandicoot/helper/group.py | filter_user | def filter_user(user, using='records', interaction=None,
part_of_week='allweek', part_of_day='allday'):
"""
Filter records of a User objects by interaction, part of week and day.
Parameters
----------
user : User
a bandicoot User object
type : str, default 'records'
'records' or 'recharges'
part_of_week : {'allweek', 'weekday', 'weekend'}, default 'allweek'
* 'weekend': keep only the weekend records
* 'weekday': keep only the weekdays records
* 'allweek': use all the records
part_of_day : {'allday', 'day', 'night'}, default 'allday'
* 'day': keep only the records during the day
* 'night': keep only the records during the night
* 'allday': use all the records
interaction : object
The interaction to filter records:
* "callandtext", for only callandtext;
* a string, to filter for one type;
* None, to use all records.
"""
if using == 'recharges':
records = user.recharges
else:
records = user.records
if interaction == 'callandtext':
records = filter(
lambda r: r.interaction in ['call', 'text'], records)
elif interaction is not None:
records = filter(lambda r: r.interaction == interaction, records)
if part_of_week == 'weekday':
records = filter(
lambda r: r.datetime.isoweekday() not in user.weekend, records)
elif part_of_week == 'weekend':
records = filter(
lambda r: r.datetime.isoweekday() in user.weekend, records)
elif part_of_week != 'allweek':
raise KeyError(
"{} is not a valid value for part_of_week. it should be 'weekday', "
"'weekend' or 'allweek'.".format(part_of_week))
if user.night_start < user.night_end:
night_filter = lambda r: user.night_end > r.datetime.time(
) > user.night_start
else:
night_filter = lambda r: not(
user.night_end < r.datetime.time() < user.night_start)
if part_of_day == 'day':
records = filter(lambda r: not(night_filter(r)), records)
elif part_of_day == 'night':
records = filter(night_filter, records)
elif part_of_day != 'allday':
raise KeyError(
"{} is not a valid value for part_of_day. It should be 'day', 'night' or 'allday'.".format(part_of_day))
return list(records) | python | def filter_user(user, using='records', interaction=None,
part_of_week='allweek', part_of_day='allday'):
"""
Filter records of a User objects by interaction, part of week and day.
Parameters
----------
user : User
a bandicoot User object
type : str, default 'records'
'records' or 'recharges'
part_of_week : {'allweek', 'weekday', 'weekend'}, default 'allweek'
* 'weekend': keep only the weekend records
* 'weekday': keep only the weekdays records
* 'allweek': use all the records
part_of_day : {'allday', 'day', 'night'}, default 'allday'
* 'day': keep only the records during the day
* 'night': keep only the records during the night
* 'allday': use all the records
interaction : object
The interaction to filter records:
* "callandtext", for only callandtext;
* a string, to filter for one type;
* None, to use all records.
"""
if using == 'recharges':
records = user.recharges
else:
records = user.records
if interaction == 'callandtext':
records = filter(
lambda r: r.interaction in ['call', 'text'], records)
elif interaction is not None:
records = filter(lambda r: r.interaction == interaction, records)
if part_of_week == 'weekday':
records = filter(
lambda r: r.datetime.isoweekday() not in user.weekend, records)
elif part_of_week == 'weekend':
records = filter(
lambda r: r.datetime.isoweekday() in user.weekend, records)
elif part_of_week != 'allweek':
raise KeyError(
"{} is not a valid value for part_of_week. it should be 'weekday', "
"'weekend' or 'allweek'.".format(part_of_week))
if user.night_start < user.night_end:
night_filter = lambda r: user.night_end > r.datetime.time(
) > user.night_start
else:
night_filter = lambda r: not(
user.night_end < r.datetime.time() < user.night_start)
if part_of_day == 'day':
records = filter(lambda r: not(night_filter(r)), records)
elif part_of_day == 'night':
records = filter(night_filter, records)
elif part_of_day != 'allday':
raise KeyError(
"{} is not a valid value for part_of_day. It should be 'day', 'night' or 'allday'.".format(part_of_day))
return list(records) | [
"def",
"filter_user",
"(",
"user",
",",
"using",
"=",
"'records'",
",",
"interaction",
"=",
"None",
",",
"part_of_week",
"=",
"'allweek'",
",",
"part_of_day",
"=",
"'allday'",
")",
":",
"if",
"using",
"==",
"'recharges'",
":",
"records",
"=",
"user",
".",
... | Filter records of a User objects by interaction, part of week and day.
Parameters
----------
user : User
a bandicoot User object
type : str, default 'records'
'records' or 'recharges'
part_of_week : {'allweek', 'weekday', 'weekend'}, default 'allweek'
* 'weekend': keep only the weekend records
* 'weekday': keep only the weekdays records
* 'allweek': use all the records
part_of_day : {'allday', 'day', 'night'}, default 'allday'
* 'day': keep only the records during the day
* 'night': keep only the records during the night
* 'allday': use all the records
interaction : object
The interaction to filter records:
* "callandtext", for only callandtext;
* a string, to filter for one type;
* None, to use all records. | [
"Filter",
"records",
"of",
"a",
"User",
"objects",
"by",
"interaction",
"part",
"of",
"week",
"and",
"day",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/group.py#L43-L105 | train | 23,216 |
yvesalexandre/bandicoot | bandicoot/helper/group.py | positions_binning | def positions_binning(records):
"""
Bin records by chunks of 30 minutes, returning the most prevalent position.
If multiple positions have the same number of occurrences
(during 30 minutes), we select the last one.
"""
def get_key(d):
return (d.year, d.day, d.hour, d.minute // 30)
chunks = itertools.groupby(records, key=lambda r: get_key(r.datetime))
for _, items in chunks:
positions = [i.position for i in items]
# Given the low number of positions per chunk of 30 minutes, and
# the need for a deterministic value, we use max and not Counter
yield max(positions, key=positions.count) | python | def positions_binning(records):
"""
Bin records by chunks of 30 minutes, returning the most prevalent position.
If multiple positions have the same number of occurrences
(during 30 minutes), we select the last one.
"""
def get_key(d):
return (d.year, d.day, d.hour, d.minute // 30)
chunks = itertools.groupby(records, key=lambda r: get_key(r.datetime))
for _, items in chunks:
positions = [i.position for i in items]
# Given the low number of positions per chunk of 30 minutes, and
# the need for a deterministic value, we use max and not Counter
yield max(positions, key=positions.count) | [
"def",
"positions_binning",
"(",
"records",
")",
":",
"def",
"get_key",
"(",
"d",
")",
":",
"return",
"(",
"d",
".",
"year",
",",
"d",
".",
"day",
",",
"d",
".",
"hour",
",",
"d",
".",
"minute",
"//",
"30",
")",
"chunks",
"=",
"itertools",
".",
... | Bin records by chunks of 30 minutes, returning the most prevalent position.
If multiple positions have the same number of occurrences
(during 30 minutes), we select the last one. | [
"Bin",
"records",
"by",
"chunks",
"of",
"30",
"minutes",
"returning",
"the",
"most",
"prevalent",
"position",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/group.py#L108-L124 | train | 23,217 |
yvesalexandre/bandicoot | bandicoot/helper/group.py | _group_range | def _group_range(records, method):
"""
Yield the range of all dates between the extrema of
a list of records, separated by a given time delta.
"""
start_date = records[0].datetime
end_date = records[-1].datetime
_fun = DATE_GROUPERS[method]
d = start_date
# Day and week use timedelta
if method not in ["month", "year"]:
def increment(i):
return i + timedelta(**{method + 's': 1})
elif method == "month":
def increment(i):
year, month = divmod(i.month + 1, 12)
if month == 0:
month = 12
year = year - 1
return d.replace(year=d.year + year, month=month)
elif method == "year":
def increment(i):
return d.replace(year=d.year + 1)
while _fun(d) <= _fun(end_date):
yield d
d = increment(d) | python | def _group_range(records, method):
"""
Yield the range of all dates between the extrema of
a list of records, separated by a given time delta.
"""
start_date = records[0].datetime
end_date = records[-1].datetime
_fun = DATE_GROUPERS[method]
d = start_date
# Day and week use timedelta
if method not in ["month", "year"]:
def increment(i):
return i + timedelta(**{method + 's': 1})
elif method == "month":
def increment(i):
year, month = divmod(i.month + 1, 12)
if month == 0:
month = 12
year = year - 1
return d.replace(year=d.year + year, month=month)
elif method == "year":
def increment(i):
return d.replace(year=d.year + 1)
while _fun(d) <= _fun(end_date):
yield d
d = increment(d) | [
"def",
"_group_range",
"(",
"records",
",",
"method",
")",
":",
"start_date",
"=",
"records",
"[",
"0",
"]",
".",
"datetime",
"end_date",
"=",
"records",
"[",
"-",
"1",
"]",
".",
"datetime",
"_fun",
"=",
"DATE_GROUPERS",
"[",
"method",
"]",
"d",
"=",
... | Yield the range of all dates between the extrema of
a list of records, separated by a given time delta. | [
"Yield",
"the",
"range",
"of",
"all",
"dates",
"between",
"the",
"extrema",
"of",
"a",
"list",
"of",
"records",
"separated",
"by",
"a",
"given",
"time",
"delta",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/group.py#L127-L158 | train | 23,218 |
yvesalexandre/bandicoot | bandicoot/helper/group.py | group_records | def group_records(records, groupby='week'):
"""
Group records by year, month, week, or day.
Parameters
----------
records : iterator
An iterator over records
groupby : Default is 'week':
* 'week': group all records by year and week
* None: records are not grouped. This is useful if you don't want to
divide records in chunks
* "day", "month", and "year" also accepted
"""
def _group_date(records, _fun):
for _, chunk in itertools.groupby(records, key=lambda r: _fun(r.datetime)):
yield list(chunk)
return _group_date(records, DATE_GROUPERS[groupby]) | python | def group_records(records, groupby='week'):
"""
Group records by year, month, week, or day.
Parameters
----------
records : iterator
An iterator over records
groupby : Default is 'week':
* 'week': group all records by year and week
* None: records are not grouped. This is useful if you don't want to
divide records in chunks
* "day", "month", and "year" also accepted
"""
def _group_date(records, _fun):
for _, chunk in itertools.groupby(records, key=lambda r: _fun(r.datetime)):
yield list(chunk)
return _group_date(records, DATE_GROUPERS[groupby]) | [
"def",
"group_records",
"(",
"records",
",",
"groupby",
"=",
"'week'",
")",
":",
"def",
"_group_date",
"(",
"records",
",",
"_fun",
")",
":",
"for",
"_",
",",
"chunk",
"in",
"itertools",
".",
"groupby",
"(",
"records",
",",
"key",
"=",
"lambda",
"r",
... | Group records by year, month, week, or day.
Parameters
----------
records : iterator
An iterator over records
groupby : Default is 'week':
* 'week': group all records by year and week
* None: records are not grouped. This is useful if you don't want to
divide records in chunks
* "day", "month", and "year" also accepted | [
"Group",
"records",
"by",
"year",
"month",
"week",
"or",
"day",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/group.py#L186-L206 | train | 23,219 |
yvesalexandre/bandicoot | bandicoot/helper/group.py | infer_type | def infer_type(data):
"""
Infer the type of objects returned by indicators.
infer_type returns:
- 'scalar' for a number or None,
- 'summarystats' for a SummaryStats object,
- 'distribution_scalar' for a list of scalars,
- 'distribution_summarystats' for a list of SummaryStats objects
"""
if isinstance(data, (type(None), numbers.Number)):
return 'scalar'
if isinstance(data, SummaryStats):
return 'summarystats'
if hasattr(data, "__len__"): # list or numpy array
data = [x for x in data if x is not None]
if len(data) == 0 or isinstance(data[0], numbers.Number):
return 'distribution_scalar'
if isinstance(data[0], SummaryStats):
return 'distribution_summarystats'
raise TypeError(
"{} is not a valid input. It should be a number, a SummaryStats "
"object, or None".format(data[0]))
raise TypeError(
"{} is not a valid input. It should be a number, a SummaryStats "
"object, or a list".format(data)) | python | def infer_type(data):
"""
Infer the type of objects returned by indicators.
infer_type returns:
- 'scalar' for a number or None,
- 'summarystats' for a SummaryStats object,
- 'distribution_scalar' for a list of scalars,
- 'distribution_summarystats' for a list of SummaryStats objects
"""
if isinstance(data, (type(None), numbers.Number)):
return 'scalar'
if isinstance(data, SummaryStats):
return 'summarystats'
if hasattr(data, "__len__"): # list or numpy array
data = [x for x in data if x is not None]
if len(data) == 0 or isinstance(data[0], numbers.Number):
return 'distribution_scalar'
if isinstance(data[0], SummaryStats):
return 'distribution_summarystats'
raise TypeError(
"{} is not a valid input. It should be a number, a SummaryStats "
"object, or None".format(data[0]))
raise TypeError(
"{} is not a valid input. It should be a number, a SummaryStats "
"object, or a list".format(data)) | [
"def",
"infer_type",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"(",
"type",
"(",
"None",
")",
",",
"numbers",
".",
"Number",
")",
")",
":",
"return",
"'scalar'",
"if",
"isinstance",
"(",
"data",
",",
"SummaryStats",
")",
":",
"retu... | Infer the type of objects returned by indicators.
infer_type returns:
- 'scalar' for a number or None,
- 'summarystats' for a SummaryStats object,
- 'distribution_scalar' for a list of scalars,
- 'distribution_summarystats' for a list of SummaryStats objects | [
"Infer",
"the",
"type",
"of",
"objects",
"returned",
"by",
"indicators",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/group.py#L209-L239 | train | 23,220 |
yvesalexandre/bandicoot | bandicoot/helper/group.py | grouping | def grouping(f=None, interaction=['call', 'text'], summary='default',
user_kwd=False):
"""
``grouping`` is a decorator for indicator functions, used to simplify the
source code.
Parameters
----------
f : function
The function to decorate
user_kwd : boolean
If user_kwd is True, the user object will be passed to the decorated
function
interaction : 'call', 'text', 'location', or a list
By default, all indicators use only 'call' and 'text' records, but the
interaction keywords filters the records passed to the function.
summary: 'default', 'extended', None
An indicator returns data statistics, ether *mean* and *std* by
default, more with 'extended', or the inner distribution with None.
See :meth:`~bandicoot.helper.group.statistics` for more details.
See :ref:`new-indicator-label` to learn how to write an indicator with
this decorator.
"""
if f is None:
return partial(grouping, user_kwd=user_kwd, interaction=interaction,
summary=summary)
def wrapper(user, groupby='week', interaction=interaction, summary=summary,
split_week=False, split_day=False, filter_empty=True,
datatype=None, **kwargs):
if interaction is None:
interaction = ['call', 'text']
parameters = divide_parameters(split_week, split_day, interaction)
operations = {
'grouping': {
'using': 'records',
'binning': False,
'groupby': groupby,
'filter_empty': filter_empty,
'divide_by': parameters
}, 'apply': {
'user_kwd': user_kwd,
'summary': summary,
'kwargs': kwargs
}
}
for i in parameters['interaction']:
if i not in ['callandtext', 'call', 'text', 'location']:
raise ValueError("%s is not a valid interaction value. Only "
"'call', 'text', and 'location' are accepted."
% i)
return _generic_wrapper(f, user, operations, datatype)
return advanced_wrap(f, wrapper) | python | def grouping(f=None, interaction=['call', 'text'], summary='default',
user_kwd=False):
"""
``grouping`` is a decorator for indicator functions, used to simplify the
source code.
Parameters
----------
f : function
The function to decorate
user_kwd : boolean
If user_kwd is True, the user object will be passed to the decorated
function
interaction : 'call', 'text', 'location', or a list
By default, all indicators use only 'call' and 'text' records, but the
interaction keywords filters the records passed to the function.
summary: 'default', 'extended', None
An indicator returns data statistics, ether *mean* and *std* by
default, more with 'extended', or the inner distribution with None.
See :meth:`~bandicoot.helper.group.statistics` for more details.
See :ref:`new-indicator-label` to learn how to write an indicator with
this decorator.
"""
if f is None:
return partial(grouping, user_kwd=user_kwd, interaction=interaction,
summary=summary)
def wrapper(user, groupby='week', interaction=interaction, summary=summary,
split_week=False, split_day=False, filter_empty=True,
datatype=None, **kwargs):
if interaction is None:
interaction = ['call', 'text']
parameters = divide_parameters(split_week, split_day, interaction)
operations = {
'grouping': {
'using': 'records',
'binning': False,
'groupby': groupby,
'filter_empty': filter_empty,
'divide_by': parameters
}, 'apply': {
'user_kwd': user_kwd,
'summary': summary,
'kwargs': kwargs
}
}
for i in parameters['interaction']:
if i not in ['callandtext', 'call', 'text', 'location']:
raise ValueError("%s is not a valid interaction value. Only "
"'call', 'text', and 'location' are accepted."
% i)
return _generic_wrapper(f, user, operations, datatype)
return advanced_wrap(f, wrapper) | [
"def",
"grouping",
"(",
"f",
"=",
"None",
",",
"interaction",
"=",
"[",
"'call'",
",",
"'text'",
"]",
",",
"summary",
"=",
"'default'",
",",
"user_kwd",
"=",
"False",
")",
":",
"if",
"f",
"is",
"None",
":",
"return",
"partial",
"(",
"grouping",
",",
... | ``grouping`` is a decorator for indicator functions, used to simplify the
source code.
Parameters
----------
f : function
The function to decorate
user_kwd : boolean
If user_kwd is True, the user object will be passed to the decorated
function
interaction : 'call', 'text', 'location', or a list
By default, all indicators use only 'call' and 'text' records, but the
interaction keywords filters the records passed to the function.
summary: 'default', 'extended', None
An indicator returns data statistics, ether *mean* and *std* by
default, more with 'extended', or the inner distribution with None.
See :meth:`~bandicoot.helper.group.statistics` for more details.
See :ref:`new-indicator-label` to learn how to write an indicator with
this decorator. | [
"grouping",
"is",
"a",
"decorator",
"for",
"indicator",
"functions",
"used",
"to",
"simplify",
"the",
"source",
"code",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/group.py#L396-L456 | train | 23,221 |
yvesalexandre/bandicoot | bandicoot/helper/maths.py | kurtosis | def kurtosis(data):
"""
Return the kurtosis for ``data``.
"""
if len(data) == 0:
return None
num = moment(data, 4)
denom = moment(data, 2) ** 2.
return num / denom if denom != 0 else 0 | python | def kurtosis(data):
"""
Return the kurtosis for ``data``.
"""
if len(data) == 0:
return None
num = moment(data, 4)
denom = moment(data, 2) ** 2.
return num / denom if denom != 0 else 0 | [
"def",
"kurtosis",
"(",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"None",
"num",
"=",
"moment",
"(",
"data",
",",
"4",
")",
"denom",
"=",
"moment",
"(",
"data",
",",
"2",
")",
"**",
"2.",
"return",
"num",
"/",
... | Return the kurtosis for ``data``. | [
"Return",
"the",
"kurtosis",
"for",
"data",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/maths.py#L44-L55 | train | 23,222 |
yvesalexandre/bandicoot | bandicoot/helper/maths.py | skewness | def skewness(data):
"""
Returns the skewness of ``data``.
"""
if len(data) == 0:
return None
num = moment(data, 3)
denom = moment(data, 2) ** 1.5
return num / denom if denom != 0 else 0. | python | def skewness(data):
"""
Returns the skewness of ``data``.
"""
if len(data) == 0:
return None
num = moment(data, 3)
denom = moment(data, 2) ** 1.5
return num / denom if denom != 0 else 0. | [
"def",
"skewness",
"(",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"None",
"num",
"=",
"moment",
"(",
"data",
",",
"3",
")",
"denom",
"=",
"moment",
"(",
"data",
",",
"2",
")",
"**",
"1.5",
"return",
"num",
"/",
... | Returns the skewness of ``data``. | [
"Returns",
"the",
"skewness",
"of",
"data",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/maths.py#L58-L69 | train | 23,223 |
yvesalexandre/bandicoot | bandicoot/helper/maths.py | median | def median(data):
"""
Return the median of numeric data, unsing the "mean of middle two" method.
If ``data`` is empty, ``0`` is returned.
Examples
--------
>>> median([1, 3, 5])
3.0
When the number of data points is even, the median is interpolated:
>>> median([1, 3, 5, 7])
4.0
"""
if len(data) == 0:
return None
data = sorted(data)
return float((data[len(data) // 2] + data[(len(data) - 1) // 2]) / 2.) | python | def median(data):
"""
Return the median of numeric data, unsing the "mean of middle two" method.
If ``data`` is empty, ``0`` is returned.
Examples
--------
>>> median([1, 3, 5])
3.0
When the number of data points is even, the median is interpolated:
>>> median([1, 3, 5, 7])
4.0
"""
if len(data) == 0:
return None
data = sorted(data)
return float((data[len(data) // 2] + data[(len(data) - 1) // 2]) / 2.) | [
"def",
"median",
"(",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"None",
"data",
"=",
"sorted",
"(",
"data",
")",
"return",
"float",
"(",
"(",
"data",
"[",
"len",
"(",
"data",
")",
"//",
"2",
"]",
"+",
"data",
... | Return the median of numeric data, unsing the "mean of middle two" method.
If ``data`` is empty, ``0`` is returned.
Examples
--------
>>> median([1, 3, 5])
3.0
When the number of data points is even, the median is interpolated:
>>> median([1, 3, 5, 7])
4.0 | [
"Return",
"the",
"median",
"of",
"numeric",
"data",
"unsing",
"the",
"mean",
"of",
"middle",
"two",
"method",
".",
"If",
"data",
"is",
"empty",
"0",
"is",
"returned",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/maths.py#L88-L108 | train | 23,224 |
yvesalexandre/bandicoot | bandicoot/helper/maths.py | entropy | def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data) | python | def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data) | [
"def",
"entropy",
"(",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"None",
"n",
"=",
"sum",
"(",
"data",
")",
"_op",
"=",
"lambda",
"f",
":",
"f",
"*",
"math",
".",
"log",
"(",
"f",
")",
"return",
"-",
"sum",
... | Compute the Shannon entropy, a measure of uncertainty. | [
"Compute",
"the",
"Shannon",
"entropy",
"a",
"measure",
"of",
"uncertainty",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/maths.py#L206-L217 | train | 23,225 |
yvesalexandre/bandicoot | bandicoot/helper/tools.py | advanced_wrap | def advanced_wrap(f, wrapper):
"""
Wrap a decorated function while keeping the same keyword arguments
"""
f_sig = list(inspect.getargspec(f))
wrap_sig = list(inspect.getargspec(wrapper))
# Update the keyword arguments of the wrapper
if f_sig[3] is None or f_sig[3] == []:
f_sig[3], f_kwargs = [], []
else:
f_kwargs = f_sig[0][-len(f_sig[3]):]
for key, default in zip(f_kwargs, f_sig[3]):
wrap_sig[0].append(key)
wrap_sig[3] = wrap_sig[3] + (default, )
wrap_sig[2] = None # Remove kwargs
src = "lambda %s: " % (inspect.formatargspec(*wrap_sig)[1:-1])
new_args = inspect.formatargspec(
wrap_sig[0], wrap_sig[1], wrap_sig[2], f_kwargs,
formatvalue=lambda x: '=' + x)
src += 'wrapper%s\n' % new_args
decorated = eval(src, locals())
decorated.func = f
return update_wrapper(decorated, f) | python | def advanced_wrap(f, wrapper):
"""
Wrap a decorated function while keeping the same keyword arguments
"""
f_sig = list(inspect.getargspec(f))
wrap_sig = list(inspect.getargspec(wrapper))
# Update the keyword arguments of the wrapper
if f_sig[3] is None or f_sig[3] == []:
f_sig[3], f_kwargs = [], []
else:
f_kwargs = f_sig[0][-len(f_sig[3]):]
for key, default in zip(f_kwargs, f_sig[3]):
wrap_sig[0].append(key)
wrap_sig[3] = wrap_sig[3] + (default, )
wrap_sig[2] = None # Remove kwargs
src = "lambda %s: " % (inspect.formatargspec(*wrap_sig)[1:-1])
new_args = inspect.formatargspec(
wrap_sig[0], wrap_sig[1], wrap_sig[2], f_kwargs,
formatvalue=lambda x: '=' + x)
src += 'wrapper%s\n' % new_args
decorated = eval(src, locals())
decorated.func = f
return update_wrapper(decorated, f) | [
"def",
"advanced_wrap",
"(",
"f",
",",
"wrapper",
")",
":",
"f_sig",
"=",
"list",
"(",
"inspect",
".",
"getargspec",
"(",
"f",
")",
")",
"wrap_sig",
"=",
"list",
"(",
"inspect",
".",
"getargspec",
"(",
"wrapper",
")",
")",
"# Update the keyword arguments o... | Wrap a decorated function while keeping the same keyword arguments | [
"Wrap",
"a",
"decorated",
"function",
"while",
"keeping",
"the",
"same",
"keyword",
"arguments"
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/tools.py#L67-L93 | train | 23,226 |
yvesalexandre/bandicoot | bandicoot/helper/tools.py | percent_records_missing_location | def percent_records_missing_location(user, method=None):
"""
Return the percentage of records missing a location parameter.
"""
if len(user.records) == 0:
return 0.
missing_locations = sum([1 for record in user.records if record.position._get_location(user) is None])
return float(missing_locations) / len(user.records) | python | def percent_records_missing_location(user, method=None):
"""
Return the percentage of records missing a location parameter.
"""
if len(user.records) == 0:
return 0.
missing_locations = sum([1 for record in user.records if record.position._get_location(user) is None])
return float(missing_locations) / len(user.records) | [
"def",
"percent_records_missing_location",
"(",
"user",
",",
"method",
"=",
"None",
")",
":",
"if",
"len",
"(",
"user",
".",
"records",
")",
"==",
"0",
":",
"return",
"0.",
"missing_locations",
"=",
"sum",
"(",
"[",
"1",
"for",
"record",
"in",
"user",
... | Return the percentage of records missing a location parameter. | [
"Return",
"the",
"percentage",
"of",
"records",
"missing",
"a",
"location",
"parameter",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/tools.py#L198-L207 | train | 23,227 |
yvesalexandre/bandicoot | bandicoot/helper/tools.py | percent_overlapping_calls | def percent_overlapping_calls(records, min_gab=300):
"""
Return the percentage of calls that overlap with the next call.
Parameters
----------
records : list
The records for a single user.
min_gab : int
Number of seconds that the calls must overlap to be considered an issue.
Defaults to 5 minutes.
"""
calls = [r for r in records if r.interaction == "call"]
if len(calls) == 0:
return 0.
overlapping_calls = 0
for i, r in enumerate(calls):
if i <= len(calls) - 2:
if r.datetime + timedelta(seconds=r.call_duration - min_gab) >= calls[i + 1].datetime:
overlapping_calls += 1
return (float(overlapping_calls) / len(calls)) | python | def percent_overlapping_calls(records, min_gab=300):
"""
Return the percentage of calls that overlap with the next call.
Parameters
----------
records : list
The records for a single user.
min_gab : int
Number of seconds that the calls must overlap to be considered an issue.
Defaults to 5 minutes.
"""
calls = [r for r in records if r.interaction == "call"]
if len(calls) == 0:
return 0.
overlapping_calls = 0
for i, r in enumerate(calls):
if i <= len(calls) - 2:
if r.datetime + timedelta(seconds=r.call_duration - min_gab) >= calls[i + 1].datetime:
overlapping_calls += 1
return (float(overlapping_calls) / len(calls)) | [
"def",
"percent_overlapping_calls",
"(",
"records",
",",
"min_gab",
"=",
"300",
")",
":",
"calls",
"=",
"[",
"r",
"for",
"r",
"in",
"records",
"if",
"r",
".",
"interaction",
"==",
"\"call\"",
"]",
"if",
"len",
"(",
"calls",
")",
"==",
"0",
":",
"retu... | Return the percentage of calls that overlap with the next call.
Parameters
----------
records : list
The records for a single user.
min_gab : int
Number of seconds that the calls must overlap to be considered an issue.
Defaults to 5 minutes. | [
"Return",
"the",
"percentage",
"of",
"calls",
"that",
"overlap",
"with",
"the",
"next",
"call",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/tools.py#L210-L234 | train | 23,228 |
yvesalexandre/bandicoot | bandicoot/helper/tools.py | antennas_missing_locations | def antennas_missing_locations(user, Method=None):
"""
Return the number of antennas missing locations in the records of a given user.
"""
unique_antennas = set([record.position.antenna for record in user.records
if record.position.antenna is not None])
return sum([1 for antenna in unique_antennas if user.antennas.get(antenna) is None]) | python | def antennas_missing_locations(user, Method=None):
"""
Return the number of antennas missing locations in the records of a given user.
"""
unique_antennas = set([record.position.antenna for record in user.records
if record.position.antenna is not None])
return sum([1 for antenna in unique_antennas if user.antennas.get(antenna) is None]) | [
"def",
"antennas_missing_locations",
"(",
"user",
",",
"Method",
"=",
"None",
")",
":",
"unique_antennas",
"=",
"set",
"(",
"[",
"record",
".",
"position",
".",
"antenna",
"for",
"record",
"in",
"user",
".",
"records",
"if",
"record",
".",
"position",
".",... | Return the number of antennas missing locations in the records of a given user. | [
"Return",
"the",
"number",
"of",
"antennas",
"missing",
"locations",
"in",
"the",
"records",
"of",
"a",
"given",
"user",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/tools.py#L237-L243 | train | 23,229 |
yvesalexandre/bandicoot | bandicoot/helper/tools.py | bandicoot_code_signature | def bandicoot_code_signature():
"""
Returns a unique hash of the Python source code in the current bandicoot
module, using the cryptographic hash function SHA-1.
"""
checksum = hashlib.sha1()
for root, dirs, files in os.walk(MAIN_DIRECTORY):
for filename in sorted(files):
if not filename.endswith('.py'):
continue
f_path = os.path.join(root, filename)
f_size = os.path.getsize(f_path)
with open(f_path, 'rb') as f:
while f.tell() != f_size:
checksum.update(f.read(0x40000))
return checksum.hexdigest() | python | def bandicoot_code_signature():
"""
Returns a unique hash of the Python source code in the current bandicoot
module, using the cryptographic hash function SHA-1.
"""
checksum = hashlib.sha1()
for root, dirs, files in os.walk(MAIN_DIRECTORY):
for filename in sorted(files):
if not filename.endswith('.py'):
continue
f_path = os.path.join(root, filename)
f_size = os.path.getsize(f_path)
with open(f_path, 'rb') as f:
while f.tell() != f_size:
checksum.update(f.read(0x40000))
return checksum.hexdigest() | [
"def",
"bandicoot_code_signature",
"(",
")",
":",
"checksum",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"MAIN_DIRECTORY",
")",
":",
"for",
"filename",
"in",
"sorted",
"(",
"files",
")... | Returns a unique hash of the Python source code in the current bandicoot
module, using the cryptographic hash function SHA-1. | [
"Returns",
"a",
"unique",
"hash",
"of",
"the",
"Python",
"source",
"code",
"in",
"the",
"current",
"bandicoot",
"module",
"using",
"the",
"cryptographic",
"hash",
"function",
"SHA",
"-",
"1",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/tools.py#L280-L298 | train | 23,230 |
yvesalexandre/bandicoot | bandicoot/helper/tools.py | _AnsiColorizer.supported | def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
raise
# guess false in case of error
return False | python | def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
raise
# guess false in case of error
return False | [
"def",
"supported",
"(",
"cls",
",",
"stream",
"=",
"sys",
".",
"stdout",
")",
":",
"if",
"not",
"stream",
".",
"isatty",
"(",
")",
":",
"return",
"False",
"# auto color only on TTYs",
"try",
":",
"import",
"curses",
"except",
"ImportError",
":",
"return",... | A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise. | [
"A",
"class",
"method",
"that",
"returns",
"True",
"if",
"the",
"current",
"platform",
"supports",
"coloring",
"terminal",
"output",
"using",
"this",
"method",
".",
"Returns",
"False",
"otherwise",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/tools.py#L147-L168 | train | 23,231 |
yvesalexandre/bandicoot | bandicoot/helper/tools.py | _AnsiColorizer.write | def write(self, text, color):
"""
Write the given text to the stream in the given color.
"""
color = self._colors[color]
self.stream.write('\x1b[{}m{}\x1b[0m'.format(color, text)) | python | def write(self, text, color):
"""
Write the given text to the stream in the given color.
"""
color = self._colors[color]
self.stream.write('\x1b[{}m{}\x1b[0m'.format(color, text)) | [
"def",
"write",
"(",
"self",
",",
"text",
",",
"color",
")",
":",
"color",
"=",
"self",
".",
"_colors",
"[",
"color",
"]",
"self",
".",
"stream",
".",
"write",
"(",
"'\\x1b[{}m{}\\x1b[0m'",
".",
"format",
"(",
"color",
",",
"text",
")",
")"
] | Write the given text to the stream in the given color. | [
"Write",
"the",
"given",
"text",
"to",
"the",
"stream",
"in",
"the",
"given",
"color",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/tools.py#L170-L175 | train | 23,232 |
yvesalexandre/bandicoot | bandicoot/spatial.py | percent_at_home | def percent_at_home(positions, user):
"""
The percentage of interactions the user had while he was at home.
.. note::
The position of the home is computed using
:meth:`User.recompute_home <bandicoot.core.User.recompute_home>`.
If no home can be found, the percentage of interactions at home
will be ``None``.
"""
if not user.has_home:
return None
total_home = sum(1 for p in positions if p == user.home)
return float(total_home) / len(positions) if len(positions) != 0 else 0 | python | def percent_at_home(positions, user):
"""
The percentage of interactions the user had while he was at home.
.. note::
The position of the home is computed using
:meth:`User.recompute_home <bandicoot.core.User.recompute_home>`.
If no home can be found, the percentage of interactions at home
will be ``None``.
"""
if not user.has_home:
return None
total_home = sum(1 for p in positions if p == user.home)
return float(total_home) / len(positions) if len(positions) != 0 else 0 | [
"def",
"percent_at_home",
"(",
"positions",
",",
"user",
")",
":",
"if",
"not",
"user",
".",
"has_home",
":",
"return",
"None",
"total_home",
"=",
"sum",
"(",
"1",
"for",
"p",
"in",
"positions",
"if",
"p",
"==",
"user",
".",
"home",
")",
"return",
"f... | The percentage of interactions the user had while he was at home.
.. note::
The position of the home is computed using
:meth:`User.recompute_home <bandicoot.core.User.recompute_home>`.
If no home can be found, the percentage of interactions at home
will be ``None``. | [
"The",
"percentage",
"of",
"interactions",
"the",
"user",
"had",
"while",
"he",
"was",
"at",
"home",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/spatial.py#L34-L49 | train | 23,233 |
yvesalexandre/bandicoot | bandicoot/spatial.py | entropy_of_antennas | def entropy_of_antennas(positions, normalize=False):
"""
The entropy of visited antennas.
Parameters
----------
normalize: boolean, default is False
Returns a normalized entropy between 0 and 1.
"""
counter = Counter(p for p in positions)
raw_entropy = entropy(list(counter.values()))
n = len(counter)
if normalize and n > 1:
return raw_entropy / math.log(n)
else:
return raw_entropy | python | def entropy_of_antennas(positions, normalize=False):
"""
The entropy of visited antennas.
Parameters
----------
normalize: boolean, default is False
Returns a normalized entropy between 0 and 1.
"""
counter = Counter(p for p in positions)
raw_entropy = entropy(list(counter.values()))
n = len(counter)
if normalize and n > 1:
return raw_entropy / math.log(n)
else:
return raw_entropy | [
"def",
"entropy_of_antennas",
"(",
"positions",
",",
"normalize",
"=",
"False",
")",
":",
"counter",
"=",
"Counter",
"(",
"p",
"for",
"p",
"in",
"positions",
")",
"raw_entropy",
"=",
"entropy",
"(",
"list",
"(",
"counter",
".",
"values",
"(",
")",
")",
... | The entropy of visited antennas.
Parameters
----------
normalize: boolean, default is False
Returns a normalized entropy between 0 and 1. | [
"The",
"entropy",
"of",
"visited",
"antennas",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/spatial.py#L88-L103 | train | 23,234 |
yvesalexandre/bandicoot | bandicoot/spatial.py | churn_rate | def churn_rate(user, summary='default', **kwargs):
"""
Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks.
"""
if len(user.records) == 0:
return statistics([], summary=summary)
query = {
'groupby': 'week',
'divide_by': OrderedDict([
('part_of_week', ['allweek']),
('part_of_day', ['allday'])
]),
'using': 'records',
'filter_empty': True,
'binning': True
}
rv = grouping_query(user, query)
weekly_positions = rv[0][1]
all_positions = list(set(p for l in weekly_positions for p in l))
frequencies = {}
cos_dist = []
for week, week_positions in enumerate(weekly_positions):
count = Counter(week_positions)
total = sum(count.values())
frequencies[week] = [count.get(p, 0) / total for p in all_positions]
all_indexes = range(len(all_positions))
for f_1, f_2 in pairwise(list(frequencies.values())):
num = sum(f_1[a] * f_2[a] for a in all_indexes)
denom_1 = sum(f ** 2 for f in f_1)
denom_2 = sum(f ** 2 for f in f_2)
cos_dist.append(1 - num / (denom_1 ** .5 * denom_2 ** .5))
return statistics(cos_dist, summary=summary) | python | def churn_rate(user, summary='default', **kwargs):
"""
Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks.
"""
if len(user.records) == 0:
return statistics([], summary=summary)
query = {
'groupby': 'week',
'divide_by': OrderedDict([
('part_of_week', ['allweek']),
('part_of_day', ['allday'])
]),
'using': 'records',
'filter_empty': True,
'binning': True
}
rv = grouping_query(user, query)
weekly_positions = rv[0][1]
all_positions = list(set(p for l in weekly_positions for p in l))
frequencies = {}
cos_dist = []
for week, week_positions in enumerate(weekly_positions):
count = Counter(week_positions)
total = sum(count.values())
frequencies[week] = [count.get(p, 0) / total for p in all_positions]
all_indexes = range(len(all_positions))
for f_1, f_2 in pairwise(list(frequencies.values())):
num = sum(f_1[a] * f_2[a] for a in all_indexes)
denom_1 = sum(f ** 2 for f in f_1)
denom_2 = sum(f ** 2 for f in f_2)
cos_dist.append(1 - num / (denom_1 ** .5 * denom_2 ** .5))
return statistics(cos_dist, summary=summary) | [
"def",
"churn_rate",
"(",
"user",
",",
"summary",
"=",
"'default'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"user",
".",
"records",
")",
"==",
"0",
":",
"return",
"statistics",
"(",
"[",
"]",
",",
"summary",
"=",
"summary",
")",
"query... | Computes the frequency spent at every towers each week, and returns the
distribution of the cosine similarity between two consecutives week.
.. note:: The churn rate is always computed between pairs of weeks. | [
"Computes",
"the",
"frequency",
"spent",
"at",
"every",
"towers",
"each",
"week",
"and",
"returns",
"the",
"distribution",
"of",
"the",
"cosine",
"similarity",
"between",
"two",
"consecutives",
"week",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/spatial.py#L133-L173 | train | 23,235 |
yvesalexandre/bandicoot | bandicoot/core.py | User.describe | def describe(self):
"""
Generates a short description of the object, and writes it to the
standard output.
Examples
--------
>>> import bandicoot as bc
>>> user = bc.User()
>>> user.records = bc.tests.generate_user.random_burst(5)
>>> user.describe()
[x] 5 records from 2014-01-01 10:41:00 to 2014-01-01 11:21:00
5 contacts
[x] 1 attribute
"""
def format_int(name, n):
if n == 0 or n == 1:
return "%i %s" % (n, name[:-1])
else:
return "%i %s" % (n, name)
empty_box = Colors.OKGREEN + '[ ]' + Colors.ENDC + ' '
filled_box = Colors.OKGREEN + '[x]' + Colors.ENDC + ' '
if self.start_time is None:
print(empty_box + "No records stored")
else:
print((filled_box + format_int("records", len(self.records)) +
" from %s to %s" % (self.start_time, self.end_time)))
nb_contacts = bc.individual.number_of_contacts(
self, interaction='callandtext', groupby=None)
nb_contacts = nb_contacts['allweek']['allday']['callandtext']
if nb_contacts:
print(filled_box + format_int("contacts", nb_contacts))
else:
print(empty_box + "No contacts")
if self.has_attributes:
print(filled_box + format_int("attributes", len(self.attributes)))
else:
print(empty_box + "No attribute stored")
if len(self.antennas) == 0:
print(empty_box + "No antenna stored")
else:
print(filled_box + format_int("antennas", len(self.antennas)))
if self.has_recharges:
print(filled_box + format_int("recharges", len(self.recharges)))
else:
print(empty_box + "No recharges")
if self.has_home:
print(filled_box + "Has home")
else:
print(empty_box + "No home")
if self.has_text:
print(filled_box + "Has texts")
else:
print(empty_box + "No texts")
if self.has_call:
print(filled_box + "Has calls")
else:
print(empty_box + "No calls")
if self.has_network:
print(filled_box + "Has network")
else:
print(empty_box + "No network") | python | def describe(self):
"""
Generates a short description of the object, and writes it to the
standard output.
Examples
--------
>>> import bandicoot as bc
>>> user = bc.User()
>>> user.records = bc.tests.generate_user.random_burst(5)
>>> user.describe()
[x] 5 records from 2014-01-01 10:41:00 to 2014-01-01 11:21:00
5 contacts
[x] 1 attribute
"""
def format_int(name, n):
if n == 0 or n == 1:
return "%i %s" % (n, name[:-1])
else:
return "%i %s" % (n, name)
empty_box = Colors.OKGREEN + '[ ]' + Colors.ENDC + ' '
filled_box = Colors.OKGREEN + '[x]' + Colors.ENDC + ' '
if self.start_time is None:
print(empty_box + "No records stored")
else:
print((filled_box + format_int("records", len(self.records)) +
" from %s to %s" % (self.start_time, self.end_time)))
nb_contacts = bc.individual.number_of_contacts(
self, interaction='callandtext', groupby=None)
nb_contacts = nb_contacts['allweek']['allday']['callandtext']
if nb_contacts:
print(filled_box + format_int("contacts", nb_contacts))
else:
print(empty_box + "No contacts")
if self.has_attributes:
print(filled_box + format_int("attributes", len(self.attributes)))
else:
print(empty_box + "No attribute stored")
if len(self.antennas) == 0:
print(empty_box + "No antenna stored")
else:
print(filled_box + format_int("antennas", len(self.antennas)))
if self.has_recharges:
print(filled_box + format_int("recharges", len(self.recharges)))
else:
print(empty_box + "No recharges")
if self.has_home:
print(filled_box + "Has home")
else:
print(empty_box + "No home")
if self.has_text:
print(filled_box + "Has texts")
else:
print(empty_box + "No texts")
if self.has_call:
print(filled_box + "Has calls")
else:
print(empty_box + "No calls")
if self.has_network:
print(filled_box + "Has network")
else:
print(empty_box + "No network") | [
"def",
"describe",
"(",
"self",
")",
":",
"def",
"format_int",
"(",
"name",
",",
"n",
")",
":",
"if",
"n",
"==",
"0",
"or",
"n",
"==",
"1",
":",
"return",
"\"%i %s\"",
"%",
"(",
"n",
",",
"name",
"[",
":",
"-",
"1",
"]",
")",
"else",
":",
"... | Generates a short description of the object, and writes it to the
standard output.
Examples
--------
>>> import bandicoot as bc
>>> user = bc.User()
>>> user.records = bc.tests.generate_user.random_burst(5)
>>> user.describe()
[x] 5 records from 2014-01-01 10:41:00 to 2014-01-01 11:21:00
5 contacts
[x] 1 attribute | [
"Generates",
"a",
"short",
"description",
"of",
"the",
"object",
"and",
"writes",
"it",
"to",
"the",
"standard",
"output",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/core.py#L294-L365 | train | 23,236 |
yvesalexandre/bandicoot | bandicoot/core.py | User.recompute_home | def recompute_home(self):
"""
Return the antenna where the user spends most of his time at night.
None is returned if there are no candidates for a home antenna
"""
if self.night_start < self.night_end:
night_filter = lambda r: self.night_end > r.datetime.time(
) > self.night_start
else:
night_filter = lambda r: not(
self.night_end < r.datetime.time() < self.night_start)
# Bin positions by chunks of 30 minutes
candidates = list(
positions_binning(filter(night_filter, self._records)))
if len(candidates) == 0:
self.home = None
else:
self.home = Counter(candidates).most_common()[0][0]
self.reset_cache()
return self.home | python | def recompute_home(self):
"""
Return the antenna where the user spends most of his time at night.
None is returned if there are no candidates for a home antenna
"""
if self.night_start < self.night_end:
night_filter = lambda r: self.night_end > r.datetime.time(
) > self.night_start
else:
night_filter = lambda r: not(
self.night_end < r.datetime.time() < self.night_start)
# Bin positions by chunks of 30 minutes
candidates = list(
positions_binning(filter(night_filter, self._records)))
if len(candidates) == 0:
self.home = None
else:
self.home = Counter(candidates).most_common()[0][0]
self.reset_cache()
return self.home | [
"def",
"recompute_home",
"(",
"self",
")",
":",
"if",
"self",
".",
"night_start",
"<",
"self",
".",
"night_end",
":",
"night_filter",
"=",
"lambda",
"r",
":",
"self",
".",
"night_end",
">",
"r",
".",
"datetime",
".",
"time",
"(",
")",
">",
"self",
".... | Return the antenna where the user spends most of his time at night.
None is returned if there are no candidates for a home antenna | [
"Return",
"the",
"antenna",
"where",
"the",
"user",
"spends",
"most",
"of",
"his",
"time",
"at",
"night",
".",
"None",
"is",
"returned",
"if",
"there",
"are",
"no",
"candidates",
"for",
"a",
"home",
"antenna"
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/core.py#L367-L390 | train | 23,237 |
yvesalexandre/bandicoot | bandicoot/core.py | User.set_home | def set_home(self, new_home):
"""
Sets the user's home. The argument can be a Position object or a
tuple containing location data.
"""
if type(new_home) is Position:
self.home = new_home
elif type(new_home) is tuple:
self.home = Position(location=new_home)
else:
self.home = Position(antenna=new_home)
self.reset_cache() | python | def set_home(self, new_home):
"""
Sets the user's home. The argument can be a Position object or a
tuple containing location data.
"""
if type(new_home) is Position:
self.home = new_home
elif type(new_home) is tuple:
self.home = Position(location=new_home)
else:
self.home = Position(antenna=new_home)
self.reset_cache() | [
"def",
"set_home",
"(",
"self",
",",
"new_home",
")",
":",
"if",
"type",
"(",
"new_home",
")",
"is",
"Position",
":",
"self",
".",
"home",
"=",
"new_home",
"elif",
"type",
"(",
"new_home",
")",
"is",
"tuple",
":",
"self",
".",
"home",
"=",
"Position"... | Sets the user's home. The argument can be a Position object or a
tuple containing location data. | [
"Sets",
"the",
"user",
"s",
"home",
".",
"The",
"argument",
"can",
"be",
"a",
"Position",
"object",
"or",
"a",
"tuple",
"containing",
"location",
"data",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/core.py#L417-L431 | train | 23,238 |
yvesalexandre/bandicoot | bandicoot/recharge.py | interevent_time_recharges | def interevent_time_recharges(recharges):
"""
Return the distribution of time between consecutive recharges
of the user.
"""
time_pairs = pairwise(r.datetime for r in recharges)
times = [(new - old).total_seconds() for old, new in time_pairs]
return summary_stats(times) | python | def interevent_time_recharges(recharges):
"""
Return the distribution of time between consecutive recharges
of the user.
"""
time_pairs = pairwise(r.datetime for r in recharges)
times = [(new - old).total_seconds() for old, new in time_pairs]
return summary_stats(times) | [
"def",
"interevent_time_recharges",
"(",
"recharges",
")",
":",
"time_pairs",
"=",
"pairwise",
"(",
"r",
".",
"datetime",
"for",
"r",
"in",
"recharges",
")",
"times",
"=",
"[",
"(",
"new",
"-",
"old",
")",
".",
"total_seconds",
"(",
")",
"for",
"old",
... | Return the distribution of time between consecutive recharges
of the user. | [
"Return",
"the",
"distribution",
"of",
"time",
"between",
"consecutive",
"recharges",
"of",
"the",
"user",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/recharge.py#L39-L46 | train | 23,239 |
yvesalexandre/bandicoot | bandicoot/recharge.py | percent_pareto_recharges | def percent_pareto_recharges(recharges, percentage=0.8):
"""
Percentage of recharges that account for 80% of total recharged amount.
"""
amounts = sorted([r.amount for r in recharges], reverse=True)
total_sum = sum(amounts)
partial_sum = 0
for count, a in enumerate(amounts):
partial_sum += a
if partial_sum >= percentage * total_sum:
break
return (count + 1) / len(recharges) | python | def percent_pareto_recharges(recharges, percentage=0.8):
"""
Percentage of recharges that account for 80% of total recharged amount.
"""
amounts = sorted([r.amount for r in recharges], reverse=True)
total_sum = sum(amounts)
partial_sum = 0
for count, a in enumerate(amounts):
partial_sum += a
if partial_sum >= percentage * total_sum:
break
return (count + 1) / len(recharges) | [
"def",
"percent_pareto_recharges",
"(",
"recharges",
",",
"percentage",
"=",
"0.8",
")",
":",
"amounts",
"=",
"sorted",
"(",
"[",
"r",
".",
"amount",
"for",
"r",
"in",
"recharges",
"]",
",",
"reverse",
"=",
"True",
")",
"total_sum",
"=",
"sum",
"(",
"a... | Percentage of recharges that account for 80% of total recharged amount. | [
"Percentage",
"of",
"recharges",
"that",
"account",
"for",
"80%",
"of",
"total",
"recharged",
"amount",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/recharge.py#L50-L63 | train | 23,240 |
yvesalexandre/bandicoot | bandicoot/recharge.py | average_balance_recharges | def average_balance_recharges(user, **kwargs):
"""
Return the average daily balance estimated from all recharges. We assume a
linear usage between two recharges, and an empty balance before a recharge.
The average balance can be seen as the area under the curve delimited by
all recharges.
"""
balance = 0
for r1, r2 in pairwise(user.recharges):
# If the range is less than 1 day, cap at 1
balance += r1.amount * min(1, (r2.datetime - r1.datetime).days) / 2
first_recharge = user.recharges[0]
last_recharge = user.recharges[-1]
duration = (last_recharge.datetime - first_recharge.datetime).days
return balance / min(1, duration) | python | def average_balance_recharges(user, **kwargs):
"""
Return the average daily balance estimated from all recharges. We assume a
linear usage between two recharges, and an empty balance before a recharge.
The average balance can be seen as the area under the curve delimited by
all recharges.
"""
balance = 0
for r1, r2 in pairwise(user.recharges):
# If the range is less than 1 day, cap at 1
balance += r1.amount * min(1, (r2.datetime - r1.datetime).days) / 2
first_recharge = user.recharges[0]
last_recharge = user.recharges[-1]
duration = (last_recharge.datetime - first_recharge.datetime).days
return balance / min(1, duration) | [
"def",
"average_balance_recharges",
"(",
"user",
",",
"*",
"*",
"kwargs",
")",
":",
"balance",
"=",
"0",
"for",
"r1",
",",
"r2",
"in",
"pairwise",
"(",
"user",
".",
"recharges",
")",
":",
"# If the range is less than 1 day, cap at 1",
"balance",
"+=",
"r1",
... | Return the average daily balance estimated from all recharges. We assume a
linear usage between two recharges, and an empty balance before a recharge.
The average balance can be seen as the area under the curve delimited by
all recharges. | [
"Return",
"the",
"average",
"daily",
"balance",
"estimated",
"from",
"all",
"recharges",
".",
"We",
"assume",
"a",
"linear",
"usage",
"between",
"two",
"recharges",
"and",
"an",
"empty",
"balance",
"before",
"a",
"recharge",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/recharge.py#L74-L91 | train | 23,241 |
yvesalexandre/bandicoot | bandicoot/network.py | _round_half_hour | def _round_half_hour(record):
"""
Round a time DOWN to half nearest half-hour.
"""
k = record.datetime + timedelta(minutes=-(record.datetime.minute % 30))
return datetime(k.year, k.month, k.day, k.hour, k.minute, 0) | python | def _round_half_hour(record):
"""
Round a time DOWN to half nearest half-hour.
"""
k = record.datetime + timedelta(minutes=-(record.datetime.minute % 30))
return datetime(k.year, k.month, k.day, k.hour, k.minute, 0) | [
"def",
"_round_half_hour",
"(",
"record",
")",
":",
"k",
"=",
"record",
".",
"datetime",
"+",
"timedelta",
"(",
"minutes",
"=",
"-",
"(",
"record",
".",
"datetime",
".",
"minute",
"%",
"30",
")",
")",
"return",
"datetime",
"(",
"k",
".",
"year",
",",... | Round a time DOWN to half nearest half-hour. | [
"Round",
"a",
"time",
"DOWN",
"to",
"half",
"nearest",
"half",
"-",
"hour",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L36-L41 | train | 23,242 |
yvesalexandre/bandicoot | bandicoot/network.py | matrix_index | def matrix_index(user):
"""
Returns the keys associated with each axis of the matrices.
The first key is always the name of the current user, followed by the
sorted names of all the correspondants.
"""
other_keys = sorted([k for k in user.network.keys() if k != user.name])
return [user.name] + other_keys | python | def matrix_index(user):
"""
Returns the keys associated with each axis of the matrices.
The first key is always the name of the current user, followed by the
sorted names of all the correspondants.
"""
other_keys = sorted([k for k in user.network.keys() if k != user.name])
return [user.name] + other_keys | [
"def",
"matrix_index",
"(",
"user",
")",
":",
"other_keys",
"=",
"sorted",
"(",
"[",
"k",
"for",
"k",
"in",
"user",
".",
"network",
".",
"keys",
"(",
")",
"if",
"k",
"!=",
"user",
".",
"name",
"]",
")",
"return",
"[",
"user",
".",
"name",
"]",
... | Returns the keys associated with each axis of the matrices.
The first key is always the name of the current user, followed by the
sorted names of all the correspondants. | [
"Returns",
"the",
"keys",
"associated",
"with",
"each",
"axis",
"of",
"the",
"matrices",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L94-L103 | train | 23,243 |
yvesalexandre/bandicoot | bandicoot/network.py | matrix_directed_unweighted | def matrix_directed_unweighted(user):
"""
Returns a directed, unweighted matrix where an edge exists if there is at
least one call or text.
"""
matrix = _interaction_matrix(user, interaction=None)
for a in range(len(matrix)):
for b in range(len(matrix)):
if matrix[a][b] is not None and matrix[a][b] > 0:
matrix[a][b] = 1
return matrix | python | def matrix_directed_unweighted(user):
"""
Returns a directed, unweighted matrix where an edge exists if there is at
least one call or text.
"""
matrix = _interaction_matrix(user, interaction=None)
for a in range(len(matrix)):
for b in range(len(matrix)):
if matrix[a][b] is not None and matrix[a][b] > 0:
matrix[a][b] = 1
return matrix | [
"def",
"matrix_directed_unweighted",
"(",
"user",
")",
":",
"matrix",
"=",
"_interaction_matrix",
"(",
"user",
",",
"interaction",
"=",
"None",
")",
"for",
"a",
"in",
"range",
"(",
"len",
"(",
"matrix",
")",
")",
":",
"for",
"b",
"in",
"range",
"(",
"l... | Returns a directed, unweighted matrix where an edge exists if there is at
least one call or text. | [
"Returns",
"a",
"directed",
"unweighted",
"matrix",
"where",
"an",
"edge",
"exists",
"if",
"there",
"is",
"at",
"least",
"one",
"call",
"or",
"text",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L124-L135 | train | 23,244 |
yvesalexandre/bandicoot | bandicoot/network.py | matrix_undirected_weighted | def matrix_undirected_weighted(user, interaction=None):
"""
Returns an undirected, weighted matrix for call, text and call duration
where an edge exists if the relationship is reciprocated.
"""
matrix = _interaction_matrix(user, interaction=interaction)
result = [[0 for _ in range(len(matrix))] for _ in range(len(matrix))]
for a in range(len(matrix)):
for b in range(len(matrix)):
if a != b and matrix[a][b] and matrix[b][a]:
result[a][b] = matrix[a][b] + matrix[b][a]
elif matrix[a][b] is None or matrix[b][a] is None:
result[a][b] = None
else:
result[a][b] = 0
return result | python | def matrix_undirected_weighted(user, interaction=None):
"""
Returns an undirected, weighted matrix for call, text and call duration
where an edge exists if the relationship is reciprocated.
"""
matrix = _interaction_matrix(user, interaction=interaction)
result = [[0 for _ in range(len(matrix))] for _ in range(len(matrix))]
for a in range(len(matrix)):
for b in range(len(matrix)):
if a != b and matrix[a][b] and matrix[b][a]:
result[a][b] = matrix[a][b] + matrix[b][a]
elif matrix[a][b] is None or matrix[b][a] is None:
result[a][b] = None
else:
result[a][b] = 0
return result | [
"def",
"matrix_undirected_weighted",
"(",
"user",
",",
"interaction",
"=",
"None",
")",
":",
"matrix",
"=",
"_interaction_matrix",
"(",
"user",
",",
"interaction",
"=",
"interaction",
")",
"result",
"=",
"[",
"[",
"0",
"for",
"_",
"in",
"range",
"(",
"len"... | Returns an undirected, weighted matrix for call, text and call duration
where an edge exists if the relationship is reciprocated. | [
"Returns",
"an",
"undirected",
"weighted",
"matrix",
"for",
"call",
"text",
"and",
"call",
"duration",
"where",
"an",
"edge",
"exists",
"if",
"the",
"relationship",
"is",
"reciprocated",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L138-L155 | train | 23,245 |
yvesalexandre/bandicoot | bandicoot/network.py | matrix_undirected_unweighted | def matrix_undirected_unweighted(user):
"""
Returns an undirected, unweighted matrix where an edge exists if the
relationship is reciprocated.
"""
matrix = matrix_undirected_weighted(user, interaction=None)
for a, b in combinations(range(len(matrix)), 2):
if matrix[a][b] is None or matrix[b][a] is None:
continue
if matrix[a][b] > 0 and matrix[b][a] > 0:
matrix[a][b], matrix[b][a] = 1, 1
return matrix | python | def matrix_undirected_unweighted(user):
"""
Returns an undirected, unweighted matrix where an edge exists if the
relationship is reciprocated.
"""
matrix = matrix_undirected_weighted(user, interaction=None)
for a, b in combinations(range(len(matrix)), 2):
if matrix[a][b] is None or matrix[b][a] is None:
continue
if matrix[a][b] > 0 and matrix[b][a] > 0:
matrix[a][b], matrix[b][a] = 1, 1
return matrix | [
"def",
"matrix_undirected_unweighted",
"(",
"user",
")",
":",
"matrix",
"=",
"matrix_undirected_weighted",
"(",
"user",
",",
"interaction",
"=",
"None",
")",
"for",
"a",
",",
"b",
"in",
"combinations",
"(",
"range",
"(",
"len",
"(",
"matrix",
")",
")",
","... | Returns an undirected, unweighted matrix where an edge exists if the
relationship is reciprocated. | [
"Returns",
"an",
"undirected",
"unweighted",
"matrix",
"where",
"an",
"edge",
"exists",
"if",
"the",
"relationship",
"is",
"reciprocated",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L158-L171 | train | 23,246 |
yvesalexandre/bandicoot | bandicoot/network.py | clustering_coefficient_unweighted | def clustering_coefficient_unweighted(user):
"""
The clustering coefficient of the user in the unweighted, undirected ego
network.
It is defined by counting the number of closed triplets including
the current user:
.. math::
C = \\frac{2 * \\text{closed triplets}}{ \\text{degree} \, (\\text{degree - 1})}
where ``degree`` is the degree of the current user in the network.
"""
matrix = matrix_undirected_unweighted(user)
closed_triplets = 0
for a, b in combinations(range(len(matrix)), 2):
a_b, a_c, b_c = matrix[a][b], matrix[a][0], matrix[b][0]
if a_b is None or a_c is None or b_c is None:
continue
if a_b > 0 and a_c > 0 and b_c > 0:
closed_triplets += 1.
d_ego = sum(matrix[0])
return 2 * closed_triplets / (d_ego * (d_ego - 1)) if d_ego > 1 else 0 | python | def clustering_coefficient_unweighted(user):
"""
The clustering coefficient of the user in the unweighted, undirected ego
network.
It is defined by counting the number of closed triplets including
the current user:
.. math::
C = \\frac{2 * \\text{closed triplets}}{ \\text{degree} \, (\\text{degree - 1})}
where ``degree`` is the degree of the current user in the network.
"""
matrix = matrix_undirected_unweighted(user)
closed_triplets = 0
for a, b in combinations(range(len(matrix)), 2):
a_b, a_c, b_c = matrix[a][b], matrix[a][0], matrix[b][0]
if a_b is None or a_c is None or b_c is None:
continue
if a_b > 0 and a_c > 0 and b_c > 0:
closed_triplets += 1.
d_ego = sum(matrix[0])
return 2 * closed_triplets / (d_ego * (d_ego - 1)) if d_ego > 1 else 0 | [
"def",
"clustering_coefficient_unweighted",
"(",
"user",
")",
":",
"matrix",
"=",
"matrix_undirected_unweighted",
"(",
"user",
")",
"closed_triplets",
"=",
"0",
"for",
"a",
",",
"b",
"in",
"combinations",
"(",
"range",
"(",
"len",
"(",
"matrix",
")",
")",
",... | The clustering coefficient of the user in the unweighted, undirected ego
network.
It is defined by counting the number of closed triplets including
the current user:
.. math::
C = \\frac{2 * \\text{closed triplets}}{ \\text{degree} \, (\\text{degree - 1})}
where ``degree`` is the degree of the current user in the network. | [
"The",
"clustering",
"coefficient",
"of",
"the",
"user",
"in",
"the",
"unweighted",
"undirected",
"ego",
"network",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L174-L200 | train | 23,247 |
yvesalexandre/bandicoot | bandicoot/network.py | clustering_coefficient_weighted | def clustering_coefficient_weighted(user, interaction=None):
"""
The clustering coefficient of the user's weighted, undirected network.
It is defined the same way as :meth`~bandicoot.network.clustering_coefficient_unweighted`,
except that closed triplets are weighted by the number of interactions. For
each triplet (A, B, C), we compute the geometric mean of the number of
interactions, using the undirected weighted matrix:
.. math::
weight_{abc} = (m_{ab} \; m_{bc} \; m_{ac})^{1/3}
The weight is normalized, between 0 and 1, by the maximum value in the
matrix.
"""
matrix = matrix_undirected_weighted(user, interaction=interaction)
weights = [weight for g in matrix for weight in g if weight is not None]
if len(weights) == 0:
return None
max_weight = max(weights)
triplet_weight = 0
for a, b in combinations(range(len(matrix)), 2):
a_b, a_c, b_c = matrix[a][b], matrix[a][0], matrix[b][0]
if a_b is None or a_c is None or b_c is None:
continue
if a_b and a_c and b_c:
triplet_weight += (a_b * a_c * b_c) ** (1 / 3) / max_weight
d_ego = sum(1 for i in matrix[0] if i > 0)
return 2 * triplet_weight / (d_ego * (d_ego - 1)) if d_ego > 1 else 0 | python | def clustering_coefficient_weighted(user, interaction=None):
"""
The clustering coefficient of the user's weighted, undirected network.
It is defined the same way as :meth`~bandicoot.network.clustering_coefficient_unweighted`,
except that closed triplets are weighted by the number of interactions. For
each triplet (A, B, C), we compute the geometric mean of the number of
interactions, using the undirected weighted matrix:
.. math::
weight_{abc} = (m_{ab} \; m_{bc} \; m_{ac})^{1/3}
The weight is normalized, between 0 and 1, by the maximum value in the
matrix.
"""
matrix = matrix_undirected_weighted(user, interaction=interaction)
weights = [weight for g in matrix for weight in g if weight is not None]
if len(weights) == 0:
return None
max_weight = max(weights)
triplet_weight = 0
for a, b in combinations(range(len(matrix)), 2):
a_b, a_c, b_c = matrix[a][b], matrix[a][0], matrix[b][0]
if a_b is None or a_c is None or b_c is None:
continue
if a_b and a_c and b_c:
triplet_weight += (a_b * a_c * b_c) ** (1 / 3) / max_weight
d_ego = sum(1 for i in matrix[0] if i > 0)
return 2 * triplet_weight / (d_ego * (d_ego - 1)) if d_ego > 1 else 0 | [
"def",
"clustering_coefficient_weighted",
"(",
"user",
",",
"interaction",
"=",
"None",
")",
":",
"matrix",
"=",
"matrix_undirected_weighted",
"(",
"user",
",",
"interaction",
"=",
"interaction",
")",
"weights",
"=",
"[",
"weight",
"for",
"g",
"in",
"matrix",
... | The clustering coefficient of the user's weighted, undirected network.
It is defined the same way as :meth`~bandicoot.network.clustering_coefficient_unweighted`,
except that closed triplets are weighted by the number of interactions. For
each triplet (A, B, C), we compute the geometric mean of the number of
interactions, using the undirected weighted matrix:
.. math::
weight_{abc} = (m_{ab} \; m_{bc} \; m_{ac})^{1/3}
The weight is normalized, between 0 and 1, by the maximum value in the
matrix. | [
"The",
"clustering",
"coefficient",
"of",
"the",
"user",
"s",
"weighted",
"undirected",
"network",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L203-L236 | train | 23,248 |
yvesalexandre/bandicoot | bandicoot/network.py | assortativity_indicators | def assortativity_indicators(user):
"""
Computes the assortativity of indicators.
This indicator measures the similarity of the current user with his
correspondants, for all bandicoot indicators. For each one, it calculates
the variance of the current user's value with the values for all his
correspondants:
.. math::
\\text{assortativity}(J) = \\frac{1}{n} \\sum_i^n (J_{\\text{user}} - J_{\\text{i}})^2
for the indicator :math:`J`, and all the :math:`n` correspondents.
"""
matrix = matrix_undirected_unweighted(user)
count_indicator = defaultdict(int)
total_indicator = defaultdict(int)
# Use all indicator except reporting variables and attributes
ego_indics = all(user, flatten=True)
ego_indics = {a: value for a, value in ego_indics.items()
if a != "name" and a[:11] != "reporting__" and
a[:10] != "attributes"}
for i, u_name in enumerate(matrix_index(user)):
correspondent = user.network.get(u_name, None)
# Non reciprocated edge
if correspondent is None or u_name == user.name or matrix[0][i] == 0:
continue
neighbor_indics = all(correspondent, flatten=True)
for a in ego_indics:
if ego_indics[a] is not None and neighbor_indics[a] is not None:
total_indicator[a] += 1
count_indicator[a] += (ego_indics[a] - neighbor_indics[a]) ** 2
assortativity = {}
for i in count_indicator:
assortativity[i] = count_indicator[i] / total_indicator[i]
return assortativity | python | def assortativity_indicators(user):
"""
Computes the assortativity of indicators.
This indicator measures the similarity of the current user with his
correspondants, for all bandicoot indicators. For each one, it calculates
the variance of the current user's value with the values for all his
correspondants:
.. math::
\\text{assortativity}(J) = \\frac{1}{n} \\sum_i^n (J_{\\text{user}} - J_{\\text{i}})^2
for the indicator :math:`J`, and all the :math:`n` correspondents.
"""
matrix = matrix_undirected_unweighted(user)
count_indicator = defaultdict(int)
total_indicator = defaultdict(int)
# Use all indicator except reporting variables and attributes
ego_indics = all(user, flatten=True)
ego_indics = {a: value for a, value in ego_indics.items()
if a != "name" and a[:11] != "reporting__" and
a[:10] != "attributes"}
for i, u_name in enumerate(matrix_index(user)):
correspondent = user.network.get(u_name, None)
# Non reciprocated edge
if correspondent is None or u_name == user.name or matrix[0][i] == 0:
continue
neighbor_indics = all(correspondent, flatten=True)
for a in ego_indics:
if ego_indics[a] is not None and neighbor_indics[a] is not None:
total_indicator[a] += 1
count_indicator[a] += (ego_indics[a] - neighbor_indics[a]) ** 2
assortativity = {}
for i in count_indicator:
assortativity[i] = count_indicator[i] / total_indicator[i]
return assortativity | [
"def",
"assortativity_indicators",
"(",
"user",
")",
":",
"matrix",
"=",
"matrix_undirected_unweighted",
"(",
"user",
")",
"count_indicator",
"=",
"defaultdict",
"(",
"int",
")",
"total_indicator",
"=",
"defaultdict",
"(",
"int",
")",
"# Use all indicator except repor... | Computes the assortativity of indicators.
This indicator measures the similarity of the current user with his
correspondants, for all bandicoot indicators. For each one, it calculates
the variance of the current user's value with the values for all his
correspondants:
.. math::
\\text{assortativity}(J) = \\frac{1}{n} \\sum_i^n (J_{\\text{user}} - J_{\\text{i}})^2
for the indicator :math:`J`, and all the :math:`n` correspondents. | [
"Computes",
"the",
"assortativity",
"of",
"indicators",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L239-L283 | train | 23,249 |
yvesalexandre/bandicoot | bandicoot/network.py | assortativity_attributes | def assortativity_attributes(user):
"""
Computes the assortativity of the nominal attributes.
This indicator measures the homophily of the current user with his
correspondants, for each attributes. It returns a value between 0
(no assortativity) and 1 (all the contacts share the same value):
the percentage of contacts sharing the same value.
"""
matrix = matrix_undirected_unweighted(user)
neighbors = [k for k in user.network.keys() if k != user.name]
neighbors_attrbs = {}
for i, u_name in enumerate(matrix_index(user)):
correspondent = user.network.get(u_name, None)
if correspondent is None or u_name == user.name or matrix[0][i] == 0:
continue
if correspondent.has_attributes:
neighbors_attrbs[correspondent.name] = correspondent.attributes
assortativity = {}
for a in user.attributes:
total = sum(1 for n in neighbors if n in neighbors_attrbs and user.attributes[a] == neighbors_attrbs[n][a])
den = sum(1 for n in neighbors if n in neighbors_attrbs)
assortativity[a] = total / den if den != 0 else None
return assortativity | python | def assortativity_attributes(user):
"""
Computes the assortativity of the nominal attributes.
This indicator measures the homophily of the current user with his
correspondants, for each attributes. It returns a value between 0
(no assortativity) and 1 (all the contacts share the same value):
the percentage of contacts sharing the same value.
"""
matrix = matrix_undirected_unweighted(user)
neighbors = [k for k in user.network.keys() if k != user.name]
neighbors_attrbs = {}
for i, u_name in enumerate(matrix_index(user)):
correspondent = user.network.get(u_name, None)
if correspondent is None or u_name == user.name or matrix[0][i] == 0:
continue
if correspondent.has_attributes:
neighbors_attrbs[correspondent.name] = correspondent.attributes
assortativity = {}
for a in user.attributes:
total = sum(1 for n in neighbors if n in neighbors_attrbs and user.attributes[a] == neighbors_attrbs[n][a])
den = sum(1 for n in neighbors if n in neighbors_attrbs)
assortativity[a] = total / den if den != 0 else None
return assortativity | [
"def",
"assortativity_attributes",
"(",
"user",
")",
":",
"matrix",
"=",
"matrix_undirected_unweighted",
"(",
"user",
")",
"neighbors",
"=",
"[",
"k",
"for",
"k",
"in",
"user",
".",
"network",
".",
"keys",
"(",
")",
"if",
"k",
"!=",
"user",
".",
"name",
... | Computes the assortativity of the nominal attributes.
This indicator measures the homophily of the current user with his
correspondants, for each attributes. It returns a value between 0
(no assortativity) and 1 (all the contacts share the same value):
the percentage of contacts sharing the same value. | [
"Computes",
"the",
"assortativity",
"of",
"the",
"nominal",
"attributes",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L286-L314 | train | 23,250 |
yvesalexandre/bandicoot | bandicoot/network.py | network_sampling | def network_sampling(n, filename, directory=None, snowball=False, user=None):
"""
Selects a few users and exports a CSV of indicators for them.
TODO: Returns the network/graph between the selected users.
Parameters
----------
n : int
Number of users to select.
filename : string
File to export to.
directory: string
Directory to select users from if using the default random selection.
snowball: starts from a specified user, iterates over neighbors, and does a
BFS until n neighbors are reached
"""
if snowball:
if user is None:
raise ValueError("Must specify a starting user from whom to initiate the snowball")
else:
users, agenda = [user], [user]
while len(agenda) > 0:
parent = agenda.pop()
dealphebetized_network = sorted(parent.network.items(), key=lambda k: random.random())
for neighbor in dealphebetized_network:
if neighbor[1] not in users and neighbor[1] is not None and len(users) < n:
users.append(neighbor[1])
if neighbor[1].network:
agenda.push(neighbor[1])
else:
files = [x for x in os.listdir(directory) if os.path.isfile(os.path.join(directory, x))]
shuffled_files = sorted(files, key=lambda k: random.random())
user_names = shuffled_files[:n]
users = [bc.read_csv(u[:-4], directory) for u in user_names]
if len(users) < n:
raise ValueError("Specified more users than records that exist, only {} records available".format(len(users)))
bc.to_csv([bc.utils.all(u) for u in users], filename) | python | def network_sampling(n, filename, directory=None, snowball=False, user=None):
"""
Selects a few users and exports a CSV of indicators for them.
TODO: Returns the network/graph between the selected users.
Parameters
----------
n : int
Number of users to select.
filename : string
File to export to.
directory: string
Directory to select users from if using the default random selection.
snowball: starts from a specified user, iterates over neighbors, and does a
BFS until n neighbors are reached
"""
if snowball:
if user is None:
raise ValueError("Must specify a starting user from whom to initiate the snowball")
else:
users, agenda = [user], [user]
while len(agenda) > 0:
parent = agenda.pop()
dealphebetized_network = sorted(parent.network.items(), key=lambda k: random.random())
for neighbor in dealphebetized_network:
if neighbor[1] not in users and neighbor[1] is not None and len(users) < n:
users.append(neighbor[1])
if neighbor[1].network:
agenda.push(neighbor[1])
else:
files = [x for x in os.listdir(directory) if os.path.isfile(os.path.join(directory, x))]
shuffled_files = sorted(files, key=lambda k: random.random())
user_names = shuffled_files[:n]
users = [bc.read_csv(u[:-4], directory) for u in user_names]
if len(users) < n:
raise ValueError("Specified more users than records that exist, only {} records available".format(len(users)))
bc.to_csv([bc.utils.all(u) for u in users], filename) | [
"def",
"network_sampling",
"(",
"n",
",",
"filename",
",",
"directory",
"=",
"None",
",",
"snowball",
"=",
"False",
",",
"user",
"=",
"None",
")",
":",
"if",
"snowball",
":",
"if",
"user",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must specify a ... | Selects a few users and exports a CSV of indicators for them.
TODO: Returns the network/graph between the selected users.
Parameters
----------
n : int
Number of users to select.
filename : string
File to export to.
directory: string
Directory to select users from if using the default random selection.
snowball: starts from a specified user, iterates over neighbors, and does a
BFS until n neighbors are reached | [
"Selects",
"a",
"few",
"users",
"and",
"exports",
"a",
"CSV",
"of",
"indicators",
"for",
"them",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/network.py#L317-L355 | train | 23,251 |
yvesalexandre/bandicoot | bandicoot/visualization.py | export | def export(user, directory=None, warnings=True):
"""
Build a temporary directory with the visualization.
Returns the local path where files have been written.
Examples
--------
>>> bandicoot.visualization.export(U)
Successfully exported the visualization to /tmp/tmpsIyncS
"""
# Get dashboard directory
current_file = os.path.realpath(__file__)
current_path = os.path.dirname(current_file)
dashboard_path = os.path.join(current_path, 'dashboard_src')
# Create a temporary directory if needed and copy all files
if directory:
dirpath = directory
else:
dirpath = tempfile.mkdtemp()
# Copy all files except source code
copy_tree(dashboard_path + '/public', dirpath, update=1)
# Export indicators
data = user_data(user)
bc.io.to_json(data, dirpath + '/data/bc_export.json', warnings=False)
if warnings:
print("Successfully exported the visualization to %s" % dirpath)
return dirpath | python | def export(user, directory=None, warnings=True):
"""
Build a temporary directory with the visualization.
Returns the local path where files have been written.
Examples
--------
>>> bandicoot.visualization.export(U)
Successfully exported the visualization to /tmp/tmpsIyncS
"""
# Get dashboard directory
current_file = os.path.realpath(__file__)
current_path = os.path.dirname(current_file)
dashboard_path = os.path.join(current_path, 'dashboard_src')
# Create a temporary directory if needed and copy all files
if directory:
dirpath = directory
else:
dirpath = tempfile.mkdtemp()
# Copy all files except source code
copy_tree(dashboard_path + '/public', dirpath, update=1)
# Export indicators
data = user_data(user)
bc.io.to_json(data, dirpath + '/data/bc_export.json', warnings=False)
if warnings:
print("Successfully exported the visualization to %s" % dirpath)
return dirpath | [
"def",
"export",
"(",
"user",
",",
"directory",
"=",
"None",
",",
"warnings",
"=",
"True",
")",
":",
"# Get dashboard directory",
"current_file",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
"current_path",
"=",
"os",
".",
"path",
".",
... | Build a temporary directory with the visualization.
Returns the local path where files have been written.
Examples
--------
>>> bandicoot.visualization.export(U)
Successfully exported the visualization to /tmp/tmpsIyncS | [
"Build",
"a",
"temporary",
"directory",
"with",
"the",
"visualization",
".",
"Returns",
"the",
"local",
"path",
"where",
"files",
"have",
"been",
"written",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/visualization.py#L118-L151 | train | 23,252 |
yvesalexandre/bandicoot | bandicoot/visualization.py | run | def run(user, port=4242):
"""
Build a temporary directory with a visualization and serve it over HTTP.
Examples
--------
>>> bandicoot.visualization.run(U)
Successfully exported the visualization to /tmp/tmpsIyncS
Serving bandicoot visualization at http://0.0.0.0:4242
"""
owd = os.getcwd()
dir = export(user)
os.chdir(dir)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
try:
httpd = SocketServer.TCPServer(("", port), Handler)
print("Serving bandicoot visualization at http://0.0.0.0:%i" % port)
httpd.serve_forever()
except KeyboardInterrupt:
print("^C received, shutting down the web server")
httpd.server_close()
finally:
os.chdir(owd) | python | def run(user, port=4242):
"""
Build a temporary directory with a visualization and serve it over HTTP.
Examples
--------
>>> bandicoot.visualization.run(U)
Successfully exported the visualization to /tmp/tmpsIyncS
Serving bandicoot visualization at http://0.0.0.0:4242
"""
owd = os.getcwd()
dir = export(user)
os.chdir(dir)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
try:
httpd = SocketServer.TCPServer(("", port), Handler)
print("Serving bandicoot visualization at http://0.0.0.0:%i" % port)
httpd.serve_forever()
except KeyboardInterrupt:
print("^C received, shutting down the web server")
httpd.server_close()
finally:
os.chdir(owd) | [
"def",
"run",
"(",
"user",
",",
"port",
"=",
"4242",
")",
":",
"owd",
"=",
"os",
".",
"getcwd",
"(",
")",
"dir",
"=",
"export",
"(",
"user",
")",
"os",
".",
"chdir",
"(",
"dir",
")",
"Handler",
"=",
"SimpleHTTPServer",
".",
"SimpleHTTPRequestHandler"... | Build a temporary directory with a visualization and serve it over HTTP.
Examples
--------
>>> bandicoot.visualization.run(U)
Successfully exported the visualization to /tmp/tmpsIyncS
Serving bandicoot visualization at http://0.0.0.0:4242 | [
"Build",
"a",
"temporary",
"directory",
"with",
"a",
"visualization",
"and",
"serve",
"it",
"over",
"HTTP",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/visualization.py#L154-L178 | train | 23,253 |
yvesalexandre/bandicoot | bandicoot/io.py | to_csv | def to_csv(objects, filename, digits=5, warnings=True):
"""
Export the flatten indicators of one or several users to CSV.
Parameters
----------
objects : list
List of objects to be exported.
filename : string
File to export to.
digits : int
Precision of floats.
Examples
--------
This function can be used to export the results of
:meth`bandicoot.utils.all`.
>>> U_1 = bc.User()
>>> U_2 = bc.User()
>>> bc.to_csv([bc.utils.all(U_1), bc.utils.all(U_2)], 'results_1_2.csv')
If you only have one object, you can simply pass it as argument:
>>> bc.to_csv(bc.utils.all(U_1), 'results_1.csv')
"""
if not isinstance(objects, list):
objects = [objects]
data = [flatten(obj) for obj in objects]
all_keys = [d for datum in data for d in datum.keys()]
field_names = sorted(set(all_keys), key=lambda x: all_keys.index(x))
with open(filename, 'w') as f:
w = csv.writer(f)
w.writerow(field_names)
def make_repr(item):
if item is None:
return None
elif isinstance(item, float):
return repr(round(item, digits))
else:
return str(item)
for row in data:
row = dict((k, make_repr(v)) for k, v in row.items())
w.writerow([make_repr(row.get(k, None)) for k in field_names])
if warnings:
print("Successfully exported {} object(s) to {}".format(len(objects),
filename)) | python | def to_csv(objects, filename, digits=5, warnings=True):
"""
Export the flatten indicators of one or several users to CSV.
Parameters
----------
objects : list
List of objects to be exported.
filename : string
File to export to.
digits : int
Precision of floats.
Examples
--------
This function can be used to export the results of
:meth`bandicoot.utils.all`.
>>> U_1 = bc.User()
>>> U_2 = bc.User()
>>> bc.to_csv([bc.utils.all(U_1), bc.utils.all(U_2)], 'results_1_2.csv')
If you only have one object, you can simply pass it as argument:
>>> bc.to_csv(bc.utils.all(U_1), 'results_1.csv')
"""
if not isinstance(objects, list):
objects = [objects]
data = [flatten(obj) for obj in objects]
all_keys = [d for datum in data for d in datum.keys()]
field_names = sorted(set(all_keys), key=lambda x: all_keys.index(x))
with open(filename, 'w') as f:
w = csv.writer(f)
w.writerow(field_names)
def make_repr(item):
if item is None:
return None
elif isinstance(item, float):
return repr(round(item, digits))
else:
return str(item)
for row in data:
row = dict((k, make_repr(v)) for k, v in row.items())
w.writerow([make_repr(row.get(k, None)) for k in field_names])
if warnings:
print("Successfully exported {} object(s) to {}".format(len(objects),
filename)) | [
"def",
"to_csv",
"(",
"objects",
",",
"filename",
",",
"digits",
"=",
"5",
",",
"warnings",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"objects",
",",
"list",
")",
":",
"objects",
"=",
"[",
"objects",
"]",
"data",
"=",
"[",
"flatten",
"... | Export the flatten indicators of one or several users to CSV.
Parameters
----------
objects : list
List of objects to be exported.
filename : string
File to export to.
digits : int
Precision of floats.
Examples
--------
This function can be used to export the results of
:meth`bandicoot.utils.all`.
>>> U_1 = bc.User()
>>> U_2 = bc.User()
>>> bc.to_csv([bc.utils.all(U_1), bc.utils.all(U_2)], 'results_1_2.csv')
If you only have one object, you can simply pass it as argument:
>>> bc.to_csv(bc.utils.all(U_1), 'results_1.csv') | [
"Export",
"the",
"flatten",
"indicators",
"of",
"one",
"or",
"several",
"users",
"to",
"CSV",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L46-L96 | train | 23,254 |
yvesalexandre/bandicoot | bandicoot/io.py | to_json | def to_json(objects, filename, warnings=True):
"""
Export the indicators of one or several users to JSON.
Parameters
----------
objects : list
List of objects to be exported.
filename : string
File to export to.
Examples
--------
This function can be use to export the results of
:meth`bandicoot.utils.all`.
>>> U_1 = bc.User()
>>> U_2 = bc.User()
>>> bc.to_json([bc.utils.all(U_1), bc.utils.all(U_2)], 'results_1_2.json')
If you only have one object, you can simply pass it as argument:
>>> bc.to_json(bc.utils.all(U_1), 'results_1.json')
"""
if not isinstance(objects, list):
objects = [objects]
obj_dict = OrderedDict([(obj['name'], obj) for obj in objects])
with open(filename, 'w') as f:
f.write(dumps(obj_dict, indent=4, separators=(',', ': ')))
if warnings:
print("Successfully exported {} object(s) to {}".format(len(objects),
filename)) | python | def to_json(objects, filename, warnings=True):
"""
Export the indicators of one or several users to JSON.
Parameters
----------
objects : list
List of objects to be exported.
filename : string
File to export to.
Examples
--------
This function can be use to export the results of
:meth`bandicoot.utils.all`.
>>> U_1 = bc.User()
>>> U_2 = bc.User()
>>> bc.to_json([bc.utils.all(U_1), bc.utils.all(U_2)], 'results_1_2.json')
If you only have one object, you can simply pass it as argument:
>>> bc.to_json(bc.utils.all(U_1), 'results_1.json')
"""
if not isinstance(objects, list):
objects = [objects]
obj_dict = OrderedDict([(obj['name'], obj) for obj in objects])
with open(filename, 'w') as f:
f.write(dumps(obj_dict, indent=4, separators=(',', ': ')))
if warnings:
print("Successfully exported {} object(s) to {}".format(len(objects),
filename)) | [
"def",
"to_json",
"(",
"objects",
",",
"filename",
",",
"warnings",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"objects",
",",
"list",
")",
":",
"objects",
"=",
"[",
"objects",
"]",
"obj_dict",
"=",
"OrderedDict",
"(",
"[",
"(",
"obj",
"[... | Export the indicators of one or several users to JSON.
Parameters
----------
objects : list
List of objects to be exported.
filename : string
File to export to.
Examples
--------
This function can be use to export the results of
:meth`bandicoot.utils.all`.
>>> U_1 = bc.User()
>>> U_2 = bc.User()
>>> bc.to_json([bc.utils.all(U_1), bc.utils.all(U_2)], 'results_1_2.json')
If you only have one object, you can simply pass it as argument:
>>> bc.to_json(bc.utils.all(U_1), 'results_1.json') | [
"Export",
"the",
"indicators",
"of",
"one",
"or",
"several",
"users",
"to",
"JSON",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L99-L132 | train | 23,255 |
yvesalexandre/bandicoot | bandicoot/io.py | _parse_record | def _parse_record(data, duration_format='seconds'):
"""
Parse a raw data dictionary and return a Record object.
"""
def _map_duration(s):
if s == '':
return None
elif duration_format.lower() == 'seconds':
return int(s)
else:
t = time.strptime(s, duration_format)
return 3600 * t.tm_hour + 60 * t.tm_min + t.tm_sec
def _map_position(data):
antenna = Position()
if 'antenna_id' in data and data['antenna_id']:
antenna.antenna = data['antenna_id']
if 'place_id' in data:
raise NameError("Use field name 'antenna_id' in input files. "
"'place_id' is deprecated.")
if 'latitude' in data and 'longitude' in data:
latitude = data['latitude']
longitude = data['longitude']
# latitude and longitude should not be empty strings.
if latitude and longitude:
antenna.location = float(latitude), float(longitude)
return antenna
return Record(interaction=data['interaction'] if data['interaction'] else None,
direction=data['direction'],
correspondent_id=data['correspondent_id'],
datetime=_tryto(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"),
data['datetime']),
call_duration=_tryto(_map_duration, data['call_duration']),
position=_tryto(_map_position, data)) | python | def _parse_record(data, duration_format='seconds'):
"""
Parse a raw data dictionary and return a Record object.
"""
def _map_duration(s):
if s == '':
return None
elif duration_format.lower() == 'seconds':
return int(s)
else:
t = time.strptime(s, duration_format)
return 3600 * t.tm_hour + 60 * t.tm_min + t.tm_sec
def _map_position(data):
antenna = Position()
if 'antenna_id' in data and data['antenna_id']:
antenna.antenna = data['antenna_id']
if 'place_id' in data:
raise NameError("Use field name 'antenna_id' in input files. "
"'place_id' is deprecated.")
if 'latitude' in data and 'longitude' in data:
latitude = data['latitude']
longitude = data['longitude']
# latitude and longitude should not be empty strings.
if latitude and longitude:
antenna.location = float(latitude), float(longitude)
return antenna
return Record(interaction=data['interaction'] if data['interaction'] else None,
direction=data['direction'],
correspondent_id=data['correspondent_id'],
datetime=_tryto(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"),
data['datetime']),
call_duration=_tryto(_map_duration, data['call_duration']),
position=_tryto(_map_position, data)) | [
"def",
"_parse_record",
"(",
"data",
",",
"duration_format",
"=",
"'seconds'",
")",
":",
"def",
"_map_duration",
"(",
"s",
")",
":",
"if",
"s",
"==",
"''",
":",
"return",
"None",
"elif",
"duration_format",
".",
"lower",
"(",
")",
"==",
"'seconds'",
":",
... | Parse a raw data dictionary and return a Record object. | [
"Parse",
"a",
"raw",
"data",
"dictionary",
"and",
"return",
"a",
"Record",
"object",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L147-L187 | train | 23,256 |
yvesalexandre/bandicoot | bandicoot/io.py | filter_record | def filter_record(records):
"""
Filter records and remove items with missing or inconsistent fields
Parameters
----------
records : list
A list of Record objects
Returns
-------
records, ignored : (Record list, dict)
A tuple of filtered records, and a dictionary counting the
missings fields
"""
def scheme(r):
if r.interaction is None:
call_duration_ok = True
elif r.interaction == 'call':
call_duration_ok = isinstance(r.call_duration, (int, float))
else:
call_duration_ok = True
callandtext = r.interaction in ['call', 'text']
not_callandtext = not callandtext
return {
'interaction': r.interaction in ['call', 'text', 'gps', None],
'direction': (not_callandtext and r.direction is None) or r.direction in ['in', 'out'],
'correspondent_id': not_callandtext or (r.correspondent_id not in [None, '']),
'datetime': isinstance(r.datetime, datetime),
'call_duration': call_duration_ok,
'location': callandtext or r.position.type() is not None
}
ignored = OrderedDict([
('all', 0),
('interaction', 0),
('direction', 0),
('correspondent_id', 0),
('datetime', 0),
('call_duration', 0),
('location', 0),
])
bad_records = []
def _filter(records):
for r in records:
valid = True
for key, valid_key in scheme(r).items():
if not valid_key:
ignored[key] += 1
bad_records.append(r)
# Not breaking, to count all fields with errors
valid = False
if valid:
yield r
else:
ignored['all'] += 1
return list(_filter(records)), ignored, bad_records | python | def filter_record(records):
"""
Filter records and remove items with missing or inconsistent fields
Parameters
----------
records : list
A list of Record objects
Returns
-------
records, ignored : (Record list, dict)
A tuple of filtered records, and a dictionary counting the
missings fields
"""
def scheme(r):
if r.interaction is None:
call_duration_ok = True
elif r.interaction == 'call':
call_duration_ok = isinstance(r.call_duration, (int, float))
else:
call_duration_ok = True
callandtext = r.interaction in ['call', 'text']
not_callandtext = not callandtext
return {
'interaction': r.interaction in ['call', 'text', 'gps', None],
'direction': (not_callandtext and r.direction is None) or r.direction in ['in', 'out'],
'correspondent_id': not_callandtext or (r.correspondent_id not in [None, '']),
'datetime': isinstance(r.datetime, datetime),
'call_duration': call_duration_ok,
'location': callandtext or r.position.type() is not None
}
ignored = OrderedDict([
('all', 0),
('interaction', 0),
('direction', 0),
('correspondent_id', 0),
('datetime', 0),
('call_duration', 0),
('location', 0),
])
bad_records = []
def _filter(records):
for r in records:
valid = True
for key, valid_key in scheme(r).items():
if not valid_key:
ignored[key] += 1
bad_records.append(r)
# Not breaking, to count all fields with errors
valid = False
if valid:
yield r
else:
ignored['all'] += 1
return list(_filter(records)), ignored, bad_records | [
"def",
"filter_record",
"(",
"records",
")",
":",
"def",
"scheme",
"(",
"r",
")",
":",
"if",
"r",
".",
"interaction",
"is",
"None",
":",
"call_duration_ok",
"=",
"True",
"elif",
"r",
".",
"interaction",
"==",
"'call'",
":",
"call_duration_ok",
"=",
"isin... | Filter records and remove items with missing or inconsistent fields
Parameters
----------
records : list
A list of Record objects
Returns
-------
records, ignored : (Record list, dict)
A tuple of filtered records, and a dictionary counting the
missings fields | [
"Filter",
"records",
"and",
"remove",
"items",
"with",
"missing",
"or",
"inconsistent",
"fields"
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L204-L269 | train | 23,257 |
yvesalexandre/bandicoot | bandicoot/io.py | read_csv | def read_csv(user_id, records_path, antennas_path=None, attributes_path=None,
recharges_path=None, network=False, duration_format='seconds',
describe=True, warnings=True, errors=False, drop_duplicates=False):
"""
Load user records from a CSV file.
Parameters
----------
user_id : str
ID of the user (filename)
records_path : str
Path of the directory all the user files.
antennas_path : str, optional
Path of the CSV file containing (place_id, latitude, longitude) values.
This allows antennas to be mapped to their locations.
recharges_path : str, optional
Path of the directory containing recharges files
(``datetime, amount, balance, retailer_id`` CSV file).
antennas_path : str, optional
Path of the CSV file containing (place_id, latitude, longitude) values.
This allows antennas to be mapped to their locations.
network : bool, optional
If network is True, bandicoot loads the network of the user's
correspondants from the same path. Defaults to False.
duration_format : str, default is 'seconds'
Allows reading records with call duration specified in other formats
than seconds. Options are 'seconds' or any format such as '%H:%M:%S',
'%M%S', etc.
describe : boolean
If describe is True, it will print a description of the loaded user
to the standard output.
errors : boolean
If errors is True, returns a tuple (user, errors), where user is the
user object and errors are the records which could not be loaded.
drop_duplicates : boolean
If drop_duplicates, remove "duplicated records" (same correspondants,
direction, date and time). Not activated by default.
Examples
--------
>>> user = bandicoot.read_csv('sample_records', '.')
>>> print len(user.records)
10
>>> user = bandicoot.read_csv('sample_records', 'samples', 'sample_places.csv')
>>> print len(user.antennas)
5
>>> user = bandicoot.read_csv('sample_records', '.', None, 'sample_attributes.csv')
>>> print user.attributes['age']
25
Notes
-----
- The csv files can be single, or double quoted if needed.
- Empty cells are filled with ``None``. For example, if the column
``call_duration`` is empty for one record, its value will be ``None``.
Other values such as ``"N/A"``, ``"None"``, ``"null"`` will be
considered as a text.
"""
antennas = None
if antennas_path is not None:
try:
with open(antennas_path, 'r') as csv_file:
reader = csv.DictReader(csv_file)
antennas = dict((d['antenna_id'], (float(d['latitude']),
float(d['longitude'])))
for d in reader)
except IOError:
pass
user_records = os.path.join(records_path, user_id + '.csv')
with open(user_records, 'r') as csv_file:
reader = csv.DictReader(csv_file)
records = [_parse_record(r, duration_format) for r in reader]
attributes = None
if attributes_path is not None:
user_attributes = os.path.join(attributes_path, user_id + '.csv')
attributes = _load_attributes(user_attributes)
recharges = None
if recharges_path is not None:
user_recharges = os.path.join(recharges_path, user_id + '.csv')
recharges = _load_recharges(user_recharges)
user, bad_records = load(user_id, records, antennas, attributes, recharges,
antennas_path, attributes_path, recharges_path,
describe=False, warnings=warnings,
drop_duplicates=drop_duplicates)
# Loads the network
if network is True:
user.network = _read_network(user, records_path, attributes_path,
read_csv, antennas_path, warnings,
drop_duplicates=drop_duplicates)
user.recompute_missing_neighbors()
if describe:
user.describe()
if errors:
return user, bad_records
return user | python | def read_csv(user_id, records_path, antennas_path=None, attributes_path=None,
recharges_path=None, network=False, duration_format='seconds',
describe=True, warnings=True, errors=False, drop_duplicates=False):
"""
Load user records from a CSV file.
Parameters
----------
user_id : str
ID of the user (filename)
records_path : str
Path of the directory all the user files.
antennas_path : str, optional
Path of the CSV file containing (place_id, latitude, longitude) values.
This allows antennas to be mapped to their locations.
recharges_path : str, optional
Path of the directory containing recharges files
(``datetime, amount, balance, retailer_id`` CSV file).
antennas_path : str, optional
Path of the CSV file containing (place_id, latitude, longitude) values.
This allows antennas to be mapped to their locations.
network : bool, optional
If network is True, bandicoot loads the network of the user's
correspondants from the same path. Defaults to False.
duration_format : str, default is 'seconds'
Allows reading records with call duration specified in other formats
than seconds. Options are 'seconds' or any format such as '%H:%M:%S',
'%M%S', etc.
describe : boolean
If describe is True, it will print a description of the loaded user
to the standard output.
errors : boolean
If errors is True, returns a tuple (user, errors), where user is the
user object and errors are the records which could not be loaded.
drop_duplicates : boolean
If drop_duplicates, remove "duplicated records" (same correspondants,
direction, date and time). Not activated by default.
Examples
--------
>>> user = bandicoot.read_csv('sample_records', '.')
>>> print len(user.records)
10
>>> user = bandicoot.read_csv('sample_records', 'samples', 'sample_places.csv')
>>> print len(user.antennas)
5
>>> user = bandicoot.read_csv('sample_records', '.', None, 'sample_attributes.csv')
>>> print user.attributes['age']
25
Notes
-----
- The csv files can be single, or double quoted if needed.
- Empty cells are filled with ``None``. For example, if the column
``call_duration`` is empty for one record, its value will be ``None``.
Other values such as ``"N/A"``, ``"None"``, ``"null"`` will be
considered as a text.
"""
antennas = None
if antennas_path is not None:
try:
with open(antennas_path, 'r') as csv_file:
reader = csv.DictReader(csv_file)
antennas = dict((d['antenna_id'], (float(d['latitude']),
float(d['longitude'])))
for d in reader)
except IOError:
pass
user_records = os.path.join(records_path, user_id + '.csv')
with open(user_records, 'r') as csv_file:
reader = csv.DictReader(csv_file)
records = [_parse_record(r, duration_format) for r in reader]
attributes = None
if attributes_path is not None:
user_attributes = os.path.join(attributes_path, user_id + '.csv')
attributes = _load_attributes(user_attributes)
recharges = None
if recharges_path is not None:
user_recharges = os.path.join(recharges_path, user_id + '.csv')
recharges = _load_recharges(user_recharges)
user, bad_records = load(user_id, records, antennas, attributes, recharges,
antennas_path, attributes_path, recharges_path,
describe=False, warnings=warnings,
drop_duplicates=drop_duplicates)
# Loads the network
if network is True:
user.network = _read_network(user, records_path, attributes_path,
read_csv, antennas_path, warnings,
drop_duplicates=drop_duplicates)
user.recompute_missing_neighbors()
if describe:
user.describe()
if errors:
return user, bad_records
return user | [
"def",
"read_csv",
"(",
"user_id",
",",
"records_path",
",",
"antennas_path",
"=",
"None",
",",
"attributes_path",
"=",
"None",
",",
"recharges_path",
"=",
"None",
",",
"network",
"=",
"False",
",",
"duration_format",
"=",
"'seconds'",
",",
"describe",
"=",
... | Load user records from a CSV file.
Parameters
----------
user_id : str
ID of the user (filename)
records_path : str
Path of the directory all the user files.
antennas_path : str, optional
Path of the CSV file containing (place_id, latitude, longitude) values.
This allows antennas to be mapped to their locations.
recharges_path : str, optional
Path of the directory containing recharges files
(``datetime, amount, balance, retailer_id`` CSV file).
antennas_path : str, optional
Path of the CSV file containing (place_id, latitude, longitude) values.
This allows antennas to be mapped to their locations.
network : bool, optional
If network is True, bandicoot loads the network of the user's
correspondants from the same path. Defaults to False.
duration_format : str, default is 'seconds'
Allows reading records with call duration specified in other formats
than seconds. Options are 'seconds' or any format such as '%H:%M:%S',
'%M%S', etc.
describe : boolean
If describe is True, it will print a description of the loaded user
to the standard output.
errors : boolean
If errors is True, returns a tuple (user, errors), where user is the
user object and errors are the records which could not be loaded.
drop_duplicates : boolean
If drop_duplicates, remove "duplicated records" (same correspondants,
direction, date and time). Not activated by default.
Examples
--------
>>> user = bandicoot.read_csv('sample_records', '.')
>>> print len(user.records)
10
>>> user = bandicoot.read_csv('sample_records', 'samples', 'sample_places.csv')
>>> print len(user.antennas)
5
>>> user = bandicoot.read_csv('sample_records', '.', None, 'sample_attributes.csv')
>>> print user.attributes['age']
25
Notes
-----
- The csv files can be single, or double quoted if needed.
- Empty cells are filled with ``None``. For example, if the column
``call_duration`` is empty for one record, its value will be ``None``.
Other values such as ``"N/A"``, ``"None"``, ``"null"`` will be
considered as a text. | [
"Load",
"user",
"records",
"from",
"a",
"CSV",
"file",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/io.py#L488-L604 | train | 23,258 |
yvesalexandre/bandicoot | bandicoot/individual.py | interevent_time | def interevent_time(records):
"""
The interevent time between two records of the user.
"""
inter_events = pairwise(r.datetime for r in records)
inter = [(new - old).total_seconds() for old, new in inter_events]
return summary_stats(inter) | python | def interevent_time(records):
"""
The interevent time between two records of the user.
"""
inter_events = pairwise(r.datetime for r in records)
inter = [(new - old).total_seconds() for old, new in inter_events]
return summary_stats(inter) | [
"def",
"interevent_time",
"(",
"records",
")",
":",
"inter_events",
"=",
"pairwise",
"(",
"r",
".",
"datetime",
"for",
"r",
"in",
"records",
")",
"inter",
"=",
"[",
"(",
"new",
"-",
"old",
")",
".",
"total_seconds",
"(",
")",
"for",
"old",
",",
"new"... | The interevent time between two records of the user. | [
"The",
"interevent",
"time",
"between",
"two",
"records",
"of",
"the",
"user",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L36-L43 | train | 23,259 |
yvesalexandre/bandicoot | bandicoot/individual.py | number_of_contacts | def number_of_contacts(records, direction=None, more=0):
"""
The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions.
"""
if direction is None:
counter = Counter(r.correspondent_id for r in records)
else:
counter = Counter(r.correspondent_id for r in records if r.direction == direction)
return sum(1 for d in counter.values() if d > more) | python | def number_of_contacts(records, direction=None, more=0):
"""
The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions.
"""
if direction is None:
counter = Counter(r.correspondent_id for r in records)
else:
counter = Counter(r.correspondent_id for r in records if r.direction == direction)
return sum(1 for d in counter.values() if d > more) | [
"def",
"number_of_contacts",
"(",
"records",
",",
"direction",
"=",
"None",
",",
"more",
"=",
"0",
")",
":",
"if",
"direction",
"is",
"None",
":",
"counter",
"=",
"Counter",
"(",
"r",
".",
"correspondent_id",
"for",
"r",
"in",
"records",
")",
"else",
"... | The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions. | [
"The",
"number",
"of",
"contacts",
"the",
"user",
"interacted",
"with",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L47-L63 | train | 23,260 |
yvesalexandre/bandicoot | bandicoot/individual.py | entropy_of_contacts | def entropy_of_contacts(records, normalize=False):
"""
The entropy of the user's contacts.
Parameters
----------
normalize: boolean, default is False
Returns a normalized entropy between 0 and 1.
"""
counter = Counter(r.correspondent_id for r in records)
raw_entropy = entropy(counter.values())
n = len(counter)
if normalize and n > 1:
return raw_entropy / math.log(n)
else:
return raw_entropy | python | def entropy_of_contacts(records, normalize=False):
"""
The entropy of the user's contacts.
Parameters
----------
normalize: boolean, default is False
Returns a normalized entropy between 0 and 1.
"""
counter = Counter(r.correspondent_id for r in records)
raw_entropy = entropy(counter.values())
n = len(counter)
if normalize and n > 1:
return raw_entropy / math.log(n)
else:
return raw_entropy | [
"def",
"entropy_of_contacts",
"(",
"records",
",",
"normalize",
"=",
"False",
")",
":",
"counter",
"=",
"Counter",
"(",
"r",
".",
"correspondent_id",
"for",
"r",
"in",
"records",
")",
"raw_entropy",
"=",
"entropy",
"(",
"counter",
".",
"values",
"(",
")",
... | The entropy of the user's contacts.
Parameters
----------
normalize: boolean, default is False
Returns a normalized entropy between 0 and 1. | [
"The",
"entropy",
"of",
"the",
"user",
"s",
"contacts",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L67-L84 | train | 23,261 |
yvesalexandre/bandicoot | bandicoot/individual.py | interactions_per_contact | def interactions_per_contact(records, direction=None):
"""
The number of interactions a user had with each of its contacts.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
"""
if direction is None:
counter = Counter(r.correspondent_id for r in records)
else:
counter = Counter(r.correspondent_id for r in records
if r.direction == direction)
return summary_stats(counter.values()) | python | def interactions_per_contact(records, direction=None):
"""
The number of interactions a user had with each of its contacts.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
"""
if direction is None:
counter = Counter(r.correspondent_id for r in records)
else:
counter = Counter(r.correspondent_id for r in records
if r.direction == direction)
return summary_stats(counter.values()) | [
"def",
"interactions_per_contact",
"(",
"records",
",",
"direction",
"=",
"None",
")",
":",
"if",
"direction",
"is",
"None",
":",
"counter",
"=",
"Counter",
"(",
"r",
".",
"correspondent_id",
"for",
"r",
"in",
"records",
")",
"else",
":",
"counter",
"=",
... | The number of interactions a user had with each of its contacts.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing. | [
"The",
"number",
"of",
"interactions",
"a",
"user",
"had",
"with",
"each",
"of",
"its",
"contacts",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L88-L103 | train | 23,262 |
yvesalexandre/bandicoot | bandicoot/individual.py | percent_initiated_interactions | def percent_initiated_interactions(records, user):
"""
The percentage of calls initiated by the user.
"""
if len(records) == 0:
return 0
initiated = sum(1 for r in records if r.direction == 'out')
return initiated / len(records) | python | def percent_initiated_interactions(records, user):
"""
The percentage of calls initiated by the user.
"""
if len(records) == 0:
return 0
initiated = sum(1 for r in records if r.direction == 'out')
return initiated / len(records) | [
"def",
"percent_initiated_interactions",
"(",
"records",
",",
"user",
")",
":",
"if",
"len",
"(",
"records",
")",
"==",
"0",
":",
"return",
"0",
"initiated",
"=",
"sum",
"(",
"1",
"for",
"r",
"in",
"records",
"if",
"r",
".",
"direction",
"==",
"'out'",... | The percentage of calls initiated by the user. | [
"The",
"percentage",
"of",
"calls",
"initiated",
"by",
"the",
"user",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L107-L115 | train | 23,263 |
yvesalexandre/bandicoot | bandicoot/individual.py | percent_nocturnal | def percent_nocturnal(records, user):
"""
The percentage of interactions the user had at night.
By default, nights are 7pm-7am. Nightimes can be set in
``User.night_start`` and ``User.night_end``.
"""
if len(records) == 0:
return 0
if user.night_start < user.night_end:
night_filter = lambda d: user.night_end > d.time() > user.night_start
else:
night_filter = lambda d: not(user.night_end < d.time() < user.night_start)
return sum(1 for r in records if night_filter(r.datetime)) / len(records) | python | def percent_nocturnal(records, user):
"""
The percentage of interactions the user had at night.
By default, nights are 7pm-7am. Nightimes can be set in
``User.night_start`` and ``User.night_end``.
"""
if len(records) == 0:
return 0
if user.night_start < user.night_end:
night_filter = lambda d: user.night_end > d.time() > user.night_start
else:
night_filter = lambda d: not(user.night_end < d.time() < user.night_start)
return sum(1 for r in records if night_filter(r.datetime)) / len(records) | [
"def",
"percent_nocturnal",
"(",
"records",
",",
"user",
")",
":",
"if",
"len",
"(",
"records",
")",
"==",
"0",
":",
"return",
"0",
"if",
"user",
".",
"night_start",
"<",
"user",
".",
"night_end",
":",
"night_filter",
"=",
"lambda",
"d",
":",
"user",
... | The percentage of interactions the user had at night.
By default, nights are 7pm-7am. Nightimes can be set in
``User.night_start`` and ``User.night_end``. | [
"The",
"percentage",
"of",
"interactions",
"the",
"user",
"had",
"at",
"night",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L119-L134 | train | 23,264 |
yvesalexandre/bandicoot | bandicoot/individual.py | call_duration | def call_duration(records, direction=None):
"""
The duration of the user's calls.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
"""
if direction is None:
call_durations = [r.call_duration for r in records]
else:
call_durations = [r.call_duration for r in records if r.direction == direction]
return summary_stats(call_durations) | python | def call_duration(records, direction=None):
"""
The duration of the user's calls.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
"""
if direction is None:
call_durations = [r.call_duration for r in records]
else:
call_durations = [r.call_duration for r in records if r.direction == direction]
return summary_stats(call_durations) | [
"def",
"call_duration",
"(",
"records",
",",
"direction",
"=",
"None",
")",
":",
"if",
"direction",
"is",
"None",
":",
"call_durations",
"=",
"[",
"r",
".",
"call_duration",
"for",
"r",
"in",
"records",
"]",
"else",
":",
"call_durations",
"=",
"[",
"r",
... | The duration of the user's calls.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing. | [
"The",
"duration",
"of",
"the",
"user",
"s",
"calls",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L138-L153 | train | 23,265 |
yvesalexandre/bandicoot | bandicoot/individual.py | _conversations | def _conversations(group, delta=datetime.timedelta(hours=1)):
"""
Group texts into conversations. The function returns an iterator over
records grouped by conversations.
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations.
A conversation begins when one person sends a text-message to the other and
ends when one of them makes a phone call or there is no activity between
them for an hour.
"""
last_time = None
results = []
for g in group:
if last_time is None or g.datetime - last_time < delta:
if g.interaction == 'text':
results.append(g)
# A call always ends a conversation
else:
if len(results) != 0:
yield results
results = []
else:
if len(results) != 0:
yield results
if g.interaction == 'call':
results = []
else:
results = [g]
last_time = g.datetime
if len(results) != 0:
yield results | python | def _conversations(group, delta=datetime.timedelta(hours=1)):
"""
Group texts into conversations. The function returns an iterator over
records grouped by conversations.
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations.
A conversation begins when one person sends a text-message to the other and
ends when one of them makes a phone call or there is no activity between
them for an hour.
"""
last_time = None
results = []
for g in group:
if last_time is None or g.datetime - last_time < delta:
if g.interaction == 'text':
results.append(g)
# A call always ends a conversation
else:
if len(results) != 0:
yield results
results = []
else:
if len(results) != 0:
yield results
if g.interaction == 'call':
results = []
else:
results = [g]
last_time = g.datetime
if len(results) != 0:
yield results | [
"def",
"_conversations",
"(",
"group",
",",
"delta",
"=",
"datetime",
".",
"timedelta",
"(",
"hours",
"=",
"1",
")",
")",
":",
"last_time",
"=",
"None",
"results",
"=",
"[",
"]",
"for",
"g",
"in",
"group",
":",
"if",
"last_time",
"is",
"None",
"or",
... | Group texts into conversations. The function returns an iterator over
records grouped by conversations.
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations.
A conversation begins when one person sends a text-message to the other and
ends when one of them makes a phone call or there is no activity between
them for an hour. | [
"Group",
"texts",
"into",
"conversations",
".",
"The",
"function",
"returns",
"an",
"iterator",
"over",
"records",
"grouped",
"by",
"conversations",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L156-L194 | train | 23,266 |
yvesalexandre/bandicoot | bandicoot/individual.py | percent_initiated_conversations | def percent_initiated_conversations(records):
"""
The percentage of conversations that have been initiated by the user.
Each call and each text conversation is weighted as a single interaction.
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations.
"""
interactions = defaultdict(list)
for r in records:
interactions[r.correspondent_id].append(r)
def _percent_initiated(grouped):
mapped = [(1 if conv[0].direction == 'out' else 0, 1)
for conv in _conversations(grouped)]
return mapped
all_couples = [sublist for i in interactions.values()
for sublist in _percent_initiated(i)]
if len(all_couples) == 0:
init, total = 0, 0
else:
init, total = list(map(sum, list(zip(*all_couples))))
return init / total if total != 0 else 0 | python | def percent_initiated_conversations(records):
"""
The percentage of conversations that have been initiated by the user.
Each call and each text conversation is weighted as a single interaction.
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations.
"""
interactions = defaultdict(list)
for r in records:
interactions[r.correspondent_id].append(r)
def _percent_initiated(grouped):
mapped = [(1 if conv[0].direction == 'out' else 0, 1)
for conv in _conversations(grouped)]
return mapped
all_couples = [sublist for i in interactions.values()
for sublist in _percent_initiated(i)]
if len(all_couples) == 0:
init, total = 0, 0
else:
init, total = list(map(sum, list(zip(*all_couples))))
return init / total if total != 0 else 0 | [
"def",
"percent_initiated_conversations",
"(",
"records",
")",
":",
"interactions",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"r",
"in",
"records",
":",
"interactions",
"[",
"r",
".",
"correspondent_id",
"]",
".",
"append",
"(",
"r",
")",
"def",
"_percent... | The percentage of conversations that have been initiated by the user.
Each call and each text conversation is weighted as a single interaction.
See :ref:`Using bandicoot <conversations-label>` for a definition of
conversations. | [
"The",
"percentage",
"of",
"conversations",
"that",
"have",
"been",
"initiated",
"by",
"the",
"user",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L290-L316 | train | 23,267 |
yvesalexandre/bandicoot | bandicoot/individual.py | active_days | def active_days(records):
"""
The number of days during which the user was active. A user is considered
active if he sends a text, receives a text, initiates a call, receives a
call, or has a mobility point.
"""
days = set(r.datetime.date() for r in records)
return len(days) | python | def active_days(records):
"""
The number of days during which the user was active. A user is considered
active if he sends a text, receives a text, initiates a call, receives a
call, or has a mobility point.
"""
days = set(r.datetime.date() for r in records)
return len(days) | [
"def",
"active_days",
"(",
"records",
")",
":",
"days",
"=",
"set",
"(",
"r",
".",
"datetime",
".",
"date",
"(",
")",
"for",
"r",
"in",
"records",
")",
"return",
"len",
"(",
"days",
")"
] | The number of days during which the user was active. A user is considered
active if he sends a text, receives a text, initiates a call, receives a
call, or has a mobility point. | [
"The",
"number",
"of",
"days",
"during",
"which",
"the",
"user",
"was",
"active",
".",
"A",
"user",
"is",
"considered",
"active",
"if",
"he",
"sends",
"a",
"text",
"receives",
"a",
"text",
"initiates",
"a",
"call",
"receives",
"a",
"call",
"or",
"has",
... | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L320-L327 | train | 23,268 |
yvesalexandre/bandicoot | bandicoot/individual.py | percent_pareto_interactions | def percent_pareto_interactions(records, percentage=0.8):
"""
The percentage of user's contacts that account for 80% of its interactions.
"""
if len(records) == 0:
return None
user_count = Counter(r.correspondent_id for r in records)
target = int(math.ceil(sum(user_count.values()) * percentage))
user_sort = sorted(user_count.keys(), key=lambda x: user_count[x])
while target > 0 and len(user_sort) > 0:
user_id = user_sort.pop()
target -= user_count[user_id]
return (len(user_count) - len(user_sort)) / len(records) | python | def percent_pareto_interactions(records, percentage=0.8):
"""
The percentage of user's contacts that account for 80% of its interactions.
"""
if len(records) == 0:
return None
user_count = Counter(r.correspondent_id for r in records)
target = int(math.ceil(sum(user_count.values()) * percentage))
user_sort = sorted(user_count.keys(), key=lambda x: user_count[x])
while target > 0 and len(user_sort) > 0:
user_id = user_sort.pop()
target -= user_count[user_id]
return (len(user_count) - len(user_sort)) / len(records) | [
"def",
"percent_pareto_interactions",
"(",
"records",
",",
"percentage",
"=",
"0.8",
")",
":",
"if",
"len",
"(",
"records",
")",
"==",
"0",
":",
"return",
"None",
"user_count",
"=",
"Counter",
"(",
"r",
".",
"correspondent_id",
"for",
"r",
"in",
"records",... | The percentage of user's contacts that account for 80% of its interactions. | [
"The",
"percentage",
"of",
"user",
"s",
"contacts",
"that",
"account",
"for",
"80%",
"of",
"its",
"interactions",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L331-L347 | train | 23,269 |
yvesalexandre/bandicoot | bandicoot/individual.py | number_of_interactions | def number_of_interactions(records, direction=None):
"""
The number of interactions.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
"""
if direction is None:
return len(records)
else:
return len([r for r in records if r.direction == direction]) | python | def number_of_interactions(records, direction=None):
"""
The number of interactions.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
"""
if direction is None:
return len(records)
else:
return len([r for r in records if r.direction == direction]) | [
"def",
"number_of_interactions",
"(",
"records",
",",
"direction",
"=",
"None",
")",
":",
"if",
"direction",
"is",
"None",
":",
"return",
"len",
"(",
"records",
")",
"else",
":",
"return",
"len",
"(",
"[",
"r",
"for",
"r",
"in",
"records",
"if",
"r",
... | The number of interactions.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing. | [
"The",
"number",
"of",
"interactions",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L409-L422 | train | 23,270 |
yvesalexandre/bandicoot | bandicoot/weekmatrix.py | to_csv | def to_csv(weekmatrices, filename, digits=5):
"""
Exports a list of week-matrices to a specified filename in the CSV format.
Parameters
----------
weekmatrices : list
The week-matrices to export.
filename : string
Path for the exported CSV file.
"""
with open(filename, 'w') as f:
w = csv.writer(f, lineterminator='\n')
w.writerow(['year_week', 'channel', 'weekday', 'section', 'value'])
def make_repr(item):
if item is None:
return None
elif isinstance(item, float):
return repr(round(item, digits))
else:
return str(item)
for row in weekmatrices:
w.writerow([make_repr(item) for item in row]) | python | def to_csv(weekmatrices, filename, digits=5):
"""
Exports a list of week-matrices to a specified filename in the CSV format.
Parameters
----------
weekmatrices : list
The week-matrices to export.
filename : string
Path for the exported CSV file.
"""
with open(filename, 'w') as f:
w = csv.writer(f, lineterminator='\n')
w.writerow(['year_week', 'channel', 'weekday', 'section', 'value'])
def make_repr(item):
if item is None:
return None
elif isinstance(item, float):
return repr(round(item, digits))
else:
return str(item)
for row in weekmatrices:
w.writerow([make_repr(item) for item in row]) | [
"def",
"to_csv",
"(",
"weekmatrices",
",",
"filename",
",",
"digits",
"=",
"5",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"w",
"=",
"csv",
".",
"writer",
"(",
"f",
",",
"lineterminator",
"=",
"'\\n'",
")",
"w",
"... | Exports a list of week-matrices to a specified filename in the CSV format.
Parameters
----------
weekmatrices : list
The week-matrices to export.
filename : string
Path for the exported CSV file. | [
"Exports",
"a",
"list",
"of",
"week",
"-",
"matrices",
"to",
"a",
"specified",
"filename",
"in",
"the",
"CSV",
"format",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/weekmatrix.py#L105-L130 | train | 23,271 |
yvesalexandre/bandicoot | bandicoot/weekmatrix.py | read_csv | def read_csv(filename):
"""
Read a list of week-matrices from a CSV file.
"""
with open(filename, 'r') as f:
r = csv.reader(f)
next(r) # remove header
wm = list(r)
# remove header and convert to numeric
for i, row in enumerate(wm):
row[1:4] = map(int, row[1:4])
row[4] = float(row[4])
return wm | python | def read_csv(filename):
"""
Read a list of week-matrices from a CSV file.
"""
with open(filename, 'r') as f:
r = csv.reader(f)
next(r) # remove header
wm = list(r)
# remove header and convert to numeric
for i, row in enumerate(wm):
row[1:4] = map(int, row[1:4])
row[4] = float(row[4])
return wm | [
"def",
"read_csv",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"f",
":",
"r",
"=",
"csv",
".",
"reader",
"(",
"f",
")",
"next",
"(",
"r",
")",
"# remove header",
"wm",
"=",
"list",
"(",
"r",
")",
"# remove h... | Read a list of week-matrices from a CSV file. | [
"Read",
"a",
"list",
"of",
"week",
"-",
"matrices",
"from",
"a",
"CSV",
"file",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/weekmatrix.py#L133-L148 | train | 23,272 |
yvesalexandre/bandicoot | bandicoot/weekmatrix.py | _extract_list_from_generator | def _extract_list_from_generator(generator):
"""
Iterates over a generator to extract all the objects and add them to a list.
Useful when the objects have to be used multiple times.
"""
extracted = []
for i in generator:
extracted.append(list(i))
return extracted | python | def _extract_list_from_generator(generator):
"""
Iterates over a generator to extract all the objects and add them to a list.
Useful when the objects have to be used multiple times.
"""
extracted = []
for i in generator:
extracted.append(list(i))
return extracted | [
"def",
"_extract_list_from_generator",
"(",
"generator",
")",
":",
"extracted",
"=",
"[",
"]",
"for",
"i",
"in",
"generator",
":",
"extracted",
".",
"append",
"(",
"list",
"(",
"i",
")",
")",
"return",
"extracted"
] | Iterates over a generator to extract all the objects and add them to a list.
Useful when the objects have to be used multiple times. | [
"Iterates",
"over",
"a",
"generator",
"to",
"extract",
"all",
"the",
"objects",
"and",
"add",
"them",
"to",
"a",
"list",
".",
"Useful",
"when",
"the",
"objects",
"have",
"to",
"be",
"used",
"multiple",
"times",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/weekmatrix.py#L310-L319 | train | 23,273 |
yvesalexandre/bandicoot | bandicoot/weekmatrix.py | _seconds_to_section_split | def _seconds_to_section_split(record, sections):
"""
Finds the seconds to the next section from the datetime of a record.
"""
next_section = sections[
bisect_right(sections, _find_weektime(record.datetime))] * 60
return next_section - _find_weektime(record.datetime, time_type='sec') | python | def _seconds_to_section_split(record, sections):
"""
Finds the seconds to the next section from the datetime of a record.
"""
next_section = sections[
bisect_right(sections, _find_weektime(record.datetime))] * 60
return next_section - _find_weektime(record.datetime, time_type='sec') | [
"def",
"_seconds_to_section_split",
"(",
"record",
",",
"sections",
")",
":",
"next_section",
"=",
"sections",
"[",
"bisect_right",
"(",
"sections",
",",
"_find_weektime",
"(",
"record",
".",
"datetime",
")",
")",
"]",
"*",
"60",
"return",
"next_section",
"-",... | Finds the seconds to the next section from the datetime of a record. | [
"Finds",
"the",
"seconds",
"to",
"the",
"next",
"section",
"from",
"the",
"datetime",
"of",
"a",
"record",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/weekmatrix.py#L322-L329 | train | 23,274 |
yvesalexandre/bandicoot | bandicoot/helper/stops.py | get_neighbors | def get_neighbors(distance_matrix, source, eps):
"""
Given a matrix of distance between couples of points,
return the list of every point closer than eps from a certain point.
"""
return [dest for dest, distance in enumerate(distance_matrix[source]) if distance < eps] | python | def get_neighbors(distance_matrix, source, eps):
"""
Given a matrix of distance between couples of points,
return the list of every point closer than eps from a certain point.
"""
return [dest for dest, distance in enumerate(distance_matrix[source]) if distance < eps] | [
"def",
"get_neighbors",
"(",
"distance_matrix",
",",
"source",
",",
"eps",
")",
":",
"return",
"[",
"dest",
"for",
"dest",
",",
"distance",
"in",
"enumerate",
"(",
"distance_matrix",
"[",
"source",
"]",
")",
"if",
"distance",
"<",
"eps",
"]"
] | Given a matrix of distance between couples of points,
return the list of every point closer than eps from a certain point. | [
"Given",
"a",
"matrix",
"of",
"distance",
"between",
"couples",
"of",
"points",
"return",
"the",
"list",
"of",
"every",
"point",
"closer",
"than",
"eps",
"from",
"a",
"certain",
"point",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/stops.py#L37-L43 | train | 23,275 |
yvesalexandre/bandicoot | bandicoot/helper/stops.py | fix_location | def fix_location(records, max_elapsed_seconds=300):
"""
Update position of all records based on the position of
the closest GPS record.
.. note:: Use this function when call and text records are missing a
location, but you have access to accurate GPS traces.
"""
groups = itertools.groupby(records, lambda r: r.direction)
groups = [(interaction, list(g)) for interaction, g in groups]
def tdist(t1, t2):
return abs((t1 - t2).total_seconds())
for i, (interaction, g) in enumerate(groups):
if interaction == 'in':
continue
prev_gps = groups[i-1][1][-1]
next_gps = groups[i+1][1][0]
for r in g:
if tdist(r.datetime, prev_gps.datetime) <= max_elapsed_seconds:
r.position = prev_gps.position
elif tdist(r.datetime, next_gps.datetime) <= max_elapsed_seconds:
r.position = next_gps.position | python | def fix_location(records, max_elapsed_seconds=300):
"""
Update position of all records based on the position of
the closest GPS record.
.. note:: Use this function when call and text records are missing a
location, but you have access to accurate GPS traces.
"""
groups = itertools.groupby(records, lambda r: r.direction)
groups = [(interaction, list(g)) for interaction, g in groups]
def tdist(t1, t2):
return abs((t1 - t2).total_seconds())
for i, (interaction, g) in enumerate(groups):
if interaction == 'in':
continue
prev_gps = groups[i-1][1][-1]
next_gps = groups[i+1][1][0]
for r in g:
if tdist(r.datetime, prev_gps.datetime) <= max_elapsed_seconds:
r.position = prev_gps.position
elif tdist(r.datetime, next_gps.datetime) <= max_elapsed_seconds:
r.position = next_gps.position | [
"def",
"fix_location",
"(",
"records",
",",
"max_elapsed_seconds",
"=",
"300",
")",
":",
"groups",
"=",
"itertools",
".",
"groupby",
"(",
"records",
",",
"lambda",
"r",
":",
"r",
".",
"direction",
")",
"groups",
"=",
"[",
"(",
"interaction",
",",
"list",... | Update position of all records based on the position of
the closest GPS record.
.. note:: Use this function when call and text records are missing a
location, but you have access to accurate GPS traces. | [
"Update",
"position",
"of",
"all",
"records",
"based",
"on",
"the",
"position",
"of",
"the",
"closest",
"GPS",
"record",
"."
] | 73a658f6f17331541cf0b1547028db9b70e8d58a | https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/stops.py#L174-L200 | train | 23,276 |
wbond/certvalidator | certvalidator/ocsp_client.py | fetch | def fetch(cert, issuer, hash_algo='sha1', nonce=True, user_agent=None, timeout=10):
"""
Fetches an OCSP response for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get an OCSP reponse for
:param issuer:
An asn1crypto.x509.Certificate object that is the issuer of cert
:param hash_algo:
A unicode string of "sha1" or "sha256"
:param nonce:
A boolean - if the nonce extension should be used to prevent replay
attacks
:param user_agent:
The HTTP user agent to use when requesting the OCSP response. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
An asn1crypto.ocsp.OCSPResponse object
"""
if not isinstance(cert, x509.Certificate):
raise TypeError('cert must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(cert))
if not isinstance(issuer, x509.Certificate):
raise TypeError('issuer must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(issuer))
if hash_algo not in set(['sha1', 'sha256']):
raise ValueError('hash_algo must be one of "sha1", "sha256", not %s' % repr(hash_algo))
if not isinstance(nonce, bool):
raise TypeError('nonce must be a bool, not %s' % type_name(nonce))
if user_agent is None:
user_agent = 'certvalidator %s' % __version__
elif not isinstance(user_agent, str_cls):
raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))
cert_id = ocsp.CertId({
'hash_algorithm': algos.DigestAlgorithm({'algorithm': hash_algo}),
'issuer_name_hash': getattr(cert.issuer, hash_algo),
'issuer_key_hash': getattr(issuer.public_key, hash_algo),
'serial_number': cert.serial_number,
})
request = ocsp.Request({
'req_cert': cert_id,
})
tbs_request = ocsp.TBSRequest({
'request_list': ocsp.Requests([request]),
})
if nonce:
nonce_extension = ocsp.TBSRequestExtension({
'extn_id': 'nonce',
'critical': False,
'extn_value': core.OctetString(core.OctetString(os.urandom(16)).dump())
})
tbs_request['request_extensions'] = ocsp.TBSRequestExtensions([nonce_extension])
ocsp_request = ocsp.OCSPRequest({
'tbs_request': tbs_request,
})
last_e = None
for ocsp_url in cert.ocsp_urls:
try:
request = Request(ocsp_url)
request.add_header('Accept', 'application/ocsp-response')
request.add_header('Content-Type', 'application/ocsp-request')
request.add_header('User-Agent', user_agent)
response = urlopen(request, ocsp_request.dump(), timeout)
ocsp_response = ocsp.OCSPResponse.load(response.read())
request_nonce = ocsp_request.nonce_value
response_nonce = ocsp_response.nonce_value
if request_nonce and response_nonce and request_nonce.native != response_nonce.native:
raise errors.OCSPValidationError(
'Unable to verify OCSP response since the request and response nonces do not match'
)
return ocsp_response
except (URLError) as e:
last_e = e
raise last_e | python | def fetch(cert, issuer, hash_algo='sha1', nonce=True, user_agent=None, timeout=10):
"""
Fetches an OCSP response for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get an OCSP reponse for
:param issuer:
An asn1crypto.x509.Certificate object that is the issuer of cert
:param hash_algo:
A unicode string of "sha1" or "sha256"
:param nonce:
A boolean - if the nonce extension should be used to prevent replay
attacks
:param user_agent:
The HTTP user agent to use when requesting the OCSP response. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
An asn1crypto.ocsp.OCSPResponse object
"""
if not isinstance(cert, x509.Certificate):
raise TypeError('cert must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(cert))
if not isinstance(issuer, x509.Certificate):
raise TypeError('issuer must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(issuer))
if hash_algo not in set(['sha1', 'sha256']):
raise ValueError('hash_algo must be one of "sha1", "sha256", not %s' % repr(hash_algo))
if not isinstance(nonce, bool):
raise TypeError('nonce must be a bool, not %s' % type_name(nonce))
if user_agent is None:
user_agent = 'certvalidator %s' % __version__
elif not isinstance(user_agent, str_cls):
raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))
cert_id = ocsp.CertId({
'hash_algorithm': algos.DigestAlgorithm({'algorithm': hash_algo}),
'issuer_name_hash': getattr(cert.issuer, hash_algo),
'issuer_key_hash': getattr(issuer.public_key, hash_algo),
'serial_number': cert.serial_number,
})
request = ocsp.Request({
'req_cert': cert_id,
})
tbs_request = ocsp.TBSRequest({
'request_list': ocsp.Requests([request]),
})
if nonce:
nonce_extension = ocsp.TBSRequestExtension({
'extn_id': 'nonce',
'critical': False,
'extn_value': core.OctetString(core.OctetString(os.urandom(16)).dump())
})
tbs_request['request_extensions'] = ocsp.TBSRequestExtensions([nonce_extension])
ocsp_request = ocsp.OCSPRequest({
'tbs_request': tbs_request,
})
last_e = None
for ocsp_url in cert.ocsp_urls:
try:
request = Request(ocsp_url)
request.add_header('Accept', 'application/ocsp-response')
request.add_header('Content-Type', 'application/ocsp-request')
request.add_header('User-Agent', user_agent)
response = urlopen(request, ocsp_request.dump(), timeout)
ocsp_response = ocsp.OCSPResponse.load(response.read())
request_nonce = ocsp_request.nonce_value
response_nonce = ocsp_response.nonce_value
if request_nonce and response_nonce and request_nonce.native != response_nonce.native:
raise errors.OCSPValidationError(
'Unable to verify OCSP response since the request and response nonces do not match'
)
return ocsp_response
except (URLError) as e:
last_e = e
raise last_e | [
"def",
"fetch",
"(",
"cert",
",",
"issuer",
",",
"hash_algo",
"=",
"'sha1'",
",",
"nonce",
"=",
"True",
",",
"user_agent",
"=",
"None",
",",
"timeout",
"=",
"10",
")",
":",
"if",
"not",
"isinstance",
"(",
"cert",
",",
"x509",
".",
"Certificate",
")",... | Fetches an OCSP response for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get an OCSP reponse for
:param issuer:
An asn1crypto.x509.Certificate object that is the issuer of cert
:param hash_algo:
A unicode string of "sha1" or "sha256"
:param nonce:
A boolean - if the nonce extension should be used to prevent replay
attacks
:param user_agent:
The HTTP user agent to use when requesting the OCSP response. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
An asn1crypto.ocsp.OCSPResponse object | [
"Fetches",
"an",
"OCSP",
"response",
"for",
"a",
"certificate"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/ocsp_client.py#L14-L109 | train | 23,277 |
wbond/certvalidator | certvalidator/registry.py | CertificateRegistry._walk_issuers | def _walk_issuers(self, path, paths, failed_paths):
"""
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
"""
if path.first.signature in self._ca_lookup:
paths.append(path)
return
new_branches = 0
for issuer in self._possible_issuers(path.first):
try:
self._walk_issuers(path.copy().prepend(issuer), paths, failed_paths)
new_branches += 1
except (DuplicateCertificateError):
pass
if not new_branches:
failed_paths.append(path) | python | def _walk_issuers(self, path, paths, failed_paths):
"""
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
"""
if path.first.signature in self._ca_lookup:
paths.append(path)
return
new_branches = 0
for issuer in self._possible_issuers(path.first):
try:
self._walk_issuers(path.copy().prepend(issuer), paths, failed_paths)
new_branches += 1
except (DuplicateCertificateError):
pass
if not new_branches:
failed_paths.append(path) | [
"def",
"_walk_issuers",
"(",
"self",
",",
"path",
",",
"paths",
",",
"failed_paths",
")",
":",
"if",
"path",
".",
"first",
".",
"signature",
"in",
"self",
".",
"_ca_lookup",
":",
"paths",
".",
"append",
"(",
"path",
")",
"return",
"new_branches",
"=",
... | Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list | [
"Recursively",
"looks",
"through",
"the",
"list",
"of",
"known",
"certificates",
"for",
"the",
"issuer",
"of",
"the",
"certificate",
"specified",
"stopping",
"once",
"the",
"certificate",
"in",
"question",
"is",
"one",
"contained",
"within",
"the",
"CA",
"certs"... | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/registry.py#L325-L358 | train | 23,278 |
wbond/certvalidator | certvalidator/registry.py | CertificateRegistry._possible_issuers | def _possible_issuers(self, cert):
"""
Returns a generator that will list all possible issuers for the cert
:param cert:
An asn1crypto.x509.Certificate object to find the issuer of
"""
issuer_hashable = cert.issuer.hashable
if issuer_hashable not in self._subject_map:
return
for issuer in self._subject_map[issuer_hashable]:
# Info from the authority key identifier extension can be used to
# eliminate possible options when multiple keys with the same
# subject exist, such as during a transition, or with cross-signing.
if cert.authority_key_identifier and issuer.key_identifier:
if cert.authority_key_identifier != issuer.key_identifier:
continue
elif cert.authority_issuer_serial:
if cert.authority_issuer_serial != issuer.issuer_serial:
continue
yield issuer | python | def _possible_issuers(self, cert):
"""
Returns a generator that will list all possible issuers for the cert
:param cert:
An asn1crypto.x509.Certificate object to find the issuer of
"""
issuer_hashable = cert.issuer.hashable
if issuer_hashable not in self._subject_map:
return
for issuer in self._subject_map[issuer_hashable]:
# Info from the authority key identifier extension can be used to
# eliminate possible options when multiple keys with the same
# subject exist, such as during a transition, or with cross-signing.
if cert.authority_key_identifier and issuer.key_identifier:
if cert.authority_key_identifier != issuer.key_identifier:
continue
elif cert.authority_issuer_serial:
if cert.authority_issuer_serial != issuer.issuer_serial:
continue
yield issuer | [
"def",
"_possible_issuers",
"(",
"self",
",",
"cert",
")",
":",
"issuer_hashable",
"=",
"cert",
".",
"issuer",
".",
"hashable",
"if",
"issuer_hashable",
"not",
"in",
"self",
".",
"_subject_map",
":",
"return",
"for",
"issuer",
"in",
"self",
".",
"_subject_ma... | Returns a generator that will list all possible issuers for the cert
:param cert:
An asn1crypto.x509.Certificate object to find the issuer of | [
"Returns",
"a",
"generator",
"that",
"will",
"list",
"all",
"possible",
"issuers",
"for",
"the",
"cert"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/registry.py#L360-L383 | train | 23,279 |
wbond/certvalidator | certvalidator/path.py | ValidationPath.find_issuer | def find_issuer(self, cert):
"""
Return the issuer of the cert specified, as defined by this path
:param cert:
An asn1crypto.x509.Certificate object to get the issuer of
:raises:
LookupError - when the issuer of the certificate could not be found
:return:
An asn1crypto.x509.Certificate object of the issuer
"""
for entry in self:
if entry.subject == cert.issuer:
if entry.key_identifier and cert.authority_key_identifier:
if entry.key_identifier == cert.authority_key_identifier:
return entry
else:
return entry
raise LookupError('Unable to find the issuer of the certificate specified') | python | def find_issuer(self, cert):
"""
Return the issuer of the cert specified, as defined by this path
:param cert:
An asn1crypto.x509.Certificate object to get the issuer of
:raises:
LookupError - when the issuer of the certificate could not be found
:return:
An asn1crypto.x509.Certificate object of the issuer
"""
for entry in self:
if entry.subject == cert.issuer:
if entry.key_identifier and cert.authority_key_identifier:
if entry.key_identifier == cert.authority_key_identifier:
return entry
else:
return entry
raise LookupError('Unable to find the issuer of the certificate specified') | [
"def",
"find_issuer",
"(",
"self",
",",
"cert",
")",
":",
"for",
"entry",
"in",
"self",
":",
"if",
"entry",
".",
"subject",
"==",
"cert",
".",
"issuer",
":",
"if",
"entry",
".",
"key_identifier",
"and",
"cert",
".",
"authority_key_identifier",
":",
"if",... | Return the issuer of the cert specified, as defined by this path
:param cert:
An asn1crypto.x509.Certificate object to get the issuer of
:raises:
LookupError - when the issuer of the certificate could not be found
:return:
An asn1crypto.x509.Certificate object of the issuer | [
"Return",
"the",
"issuer",
"of",
"the",
"cert",
"specified",
"as",
"defined",
"by",
"this",
"path"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/path.py#L47-L69 | train | 23,280 |
wbond/certvalidator | certvalidator/path.py | ValidationPath.truncate_to | def truncate_to(self, cert):
"""
Remove all certificates in the path after the cert specified
:param cert:
An asn1crypto.x509.Certificate object to find
:raises:
LookupError - when the certificate could not be found
:return:
The current ValidationPath object, for chaining
"""
cert_index = None
for index, entry in enumerate(self):
if entry.issuer_serial == cert.issuer_serial:
cert_index = index
break
if cert_index is None:
raise LookupError('Unable to find the certificate specified')
while len(self) > cert_index + 1:
self.pop()
return self | python | def truncate_to(self, cert):
"""
Remove all certificates in the path after the cert specified
:param cert:
An asn1crypto.x509.Certificate object to find
:raises:
LookupError - when the certificate could not be found
:return:
The current ValidationPath object, for chaining
"""
cert_index = None
for index, entry in enumerate(self):
if entry.issuer_serial == cert.issuer_serial:
cert_index = index
break
if cert_index is None:
raise LookupError('Unable to find the certificate specified')
while len(self) > cert_index + 1:
self.pop()
return self | [
"def",
"truncate_to",
"(",
"self",
",",
"cert",
")",
":",
"cert_index",
"=",
"None",
"for",
"index",
",",
"entry",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"entry",
".",
"issuer_serial",
"==",
"cert",
".",
"issuer_serial",
":",
"cert_index",
"=",
... | Remove all certificates in the path after the cert specified
:param cert:
An asn1crypto.x509.Certificate object to find
:raises:
LookupError - when the certificate could not be found
:return:
The current ValidationPath object, for chaining | [
"Remove",
"all",
"certificates",
"in",
"the",
"path",
"after",
"the",
"cert",
"specified"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/path.py#L71-L97 | train | 23,281 |
wbond/certvalidator | certvalidator/path.py | ValidationPath.truncate_to_issuer | def truncate_to_issuer(self, cert):
"""
Remove all certificates in the path after the issuer of the cert
specified, as defined by this path
:param cert:
An asn1crypto.x509.Certificate object to find the issuer of
:raises:
LookupError - when the issuer of the certificate could not be found
:return:
The current ValidationPath object, for chaining
"""
issuer_index = None
for index, entry in enumerate(self):
if entry.subject == cert.issuer:
if entry.key_identifier and cert.authority_key_identifier:
if entry.key_identifier == cert.authority_key_identifier:
issuer_index = index
break
else:
issuer_index = index
break
if issuer_index is None:
raise LookupError('Unable to find the issuer of the certificate specified')
while len(self) > issuer_index + 1:
self.pop()
return self | python | def truncate_to_issuer(self, cert):
"""
Remove all certificates in the path after the issuer of the cert
specified, as defined by this path
:param cert:
An asn1crypto.x509.Certificate object to find the issuer of
:raises:
LookupError - when the issuer of the certificate could not be found
:return:
The current ValidationPath object, for chaining
"""
issuer_index = None
for index, entry in enumerate(self):
if entry.subject == cert.issuer:
if entry.key_identifier and cert.authority_key_identifier:
if entry.key_identifier == cert.authority_key_identifier:
issuer_index = index
break
else:
issuer_index = index
break
if issuer_index is None:
raise LookupError('Unable to find the issuer of the certificate specified')
while len(self) > issuer_index + 1:
self.pop()
return self | [
"def",
"truncate_to_issuer",
"(",
"self",
",",
"cert",
")",
":",
"issuer_index",
"=",
"None",
"for",
"index",
",",
"entry",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"entry",
".",
"subject",
"==",
"cert",
".",
"issuer",
":",
"if",
"entry",
".",
... | Remove all certificates in the path after the issuer of the cert
specified, as defined by this path
:param cert:
An asn1crypto.x509.Certificate object to find the issuer of
:raises:
LookupError - when the issuer of the certificate could not be found
:return:
The current ValidationPath object, for chaining | [
"Remove",
"all",
"certificates",
"in",
"the",
"path",
"after",
"the",
"issuer",
"of",
"the",
"cert",
"specified",
"as",
"defined",
"by",
"this",
"path"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/path.py#L99-L131 | train | 23,282 |
wbond/certvalidator | certvalidator/path.py | ValidationPath.copy | def copy(self):
"""
Creates a copy of this path
:return:
A ValidationPath object
"""
copy = self.__class__()
copy._certs = self._certs[:]
copy._cert_hashes = self._cert_hashes.copy()
return copy | python | def copy(self):
"""
Creates a copy of this path
:return:
A ValidationPath object
"""
copy = self.__class__()
copy._certs = self._certs[:]
copy._cert_hashes = self._cert_hashes.copy()
return copy | [
"def",
"copy",
"(",
"self",
")",
":",
"copy",
"=",
"self",
".",
"__class__",
"(",
")",
"copy",
".",
"_certs",
"=",
"self",
".",
"_certs",
"[",
":",
"]",
"copy",
".",
"_cert_hashes",
"=",
"self",
".",
"_cert_hashes",
".",
"copy",
"(",
")",
"return",... | Creates a copy of this path
:return:
A ValidationPath object | [
"Creates",
"a",
"copy",
"of",
"this",
"path"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/path.py#L133-L144 | train | 23,283 |
wbond/certvalidator | certvalidator/path.py | ValidationPath.pop | def pop(self):
"""
Removes the last certificate from the path
:return:
The current ValidationPath object, for chaining
"""
last_cert = self._certs.pop()
self._cert_hashes.remove(last_cert.issuer_serial)
return self | python | def pop(self):
"""
Removes the last certificate from the path
:return:
The current ValidationPath object, for chaining
"""
last_cert = self._certs.pop()
self._cert_hashes.remove(last_cert.issuer_serial)
return self | [
"def",
"pop",
"(",
"self",
")",
":",
"last_cert",
"=",
"self",
".",
"_certs",
".",
"pop",
"(",
")",
"self",
".",
"_cert_hashes",
".",
"remove",
"(",
"last_cert",
".",
"issuer_serial",
")",
"return",
"self"
] | Removes the last certificate from the path
:return:
The current ValidationPath object, for chaining | [
"Removes",
"the",
"last",
"certificate",
"from",
"the",
"path"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/path.py#L146-L157 | train | 23,284 |
wbond/certvalidator | certvalidator/crl_client.py | fetch | def fetch(cert, use_deltas=True, user_agent=None, timeout=10):
"""
Fetches the CRLs for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get the CRL for
:param use_deltas:
A boolean indicating if delta CRLs should be fetched
:param user_agent:
The HTTP user agent to use when requesting the CRL. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
A list asn1crypto.crl.CertificateList objects
"""
if not isinstance(cert, x509.Certificate):
raise TypeError('cert must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(cert))
if user_agent is None:
user_agent = 'certvalidator %s' % __version__
elif not isinstance(user_agent, str_cls):
raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))
output = []
sources = cert.crl_distribution_points
if use_deltas:
sources.extend(cert.delta_crl_distribution_points)
for distribution_point in sources:
url = distribution_point.url
output.append(_grab_crl(user_agent, url, timeout))
return output | python | def fetch(cert, use_deltas=True, user_agent=None, timeout=10):
"""
Fetches the CRLs for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get the CRL for
:param use_deltas:
A boolean indicating if delta CRLs should be fetched
:param user_agent:
The HTTP user agent to use when requesting the CRL. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
A list asn1crypto.crl.CertificateList objects
"""
if not isinstance(cert, x509.Certificate):
raise TypeError('cert must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(cert))
if user_agent is None:
user_agent = 'certvalidator %s' % __version__
elif not isinstance(user_agent, str_cls):
raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))
output = []
sources = cert.crl_distribution_points
if use_deltas:
sources.extend(cert.delta_crl_distribution_points)
for distribution_point in sources:
url = distribution_point.url
output.append(_grab_crl(user_agent, url, timeout))
return output | [
"def",
"fetch",
"(",
"cert",
",",
"use_deltas",
"=",
"True",
",",
"user_agent",
"=",
"None",
",",
"timeout",
"=",
"10",
")",
":",
"if",
"not",
"isinstance",
"(",
"cert",
",",
"x509",
".",
"Certificate",
")",
":",
"raise",
"TypeError",
"(",
"'cert must ... | Fetches the CRLs for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get the CRL for
:param use_deltas:
A boolean indicating if delta CRLs should be fetched
:param user_agent:
The HTTP user agent to use when requesting the CRL. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
A list asn1crypto.crl.CertificateList objects | [
"Fetches",
"the",
"CRLs",
"for",
"a",
"certificate"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/crl_client.py#L11-L54 | train | 23,285 |
wbond/certvalidator | certvalidator/crl_client.py | _grab_crl | def _grab_crl(user_agent, url, timeout):
"""
Fetches a CRL and parses it
:param user_agent:
A unicode string of the user agent to use when fetching the URL
:param url:
A unicode string of the URL to fetch the CRL from
:param timeout:
The number of seconds after which an HTTP request should timeout
:return:
An asn1crypto.crl.CertificateList object
"""
request = Request(url)
request.add_header('Accept', 'application/pkix-crl')
request.add_header('User-Agent', user_agent)
response = urlopen(request, None, timeout)
data = response.read()
if pem.detect(data):
_, _, data = pem.unarmor(data)
return crl.CertificateList.load(data) | python | def _grab_crl(user_agent, url, timeout):
"""
Fetches a CRL and parses it
:param user_agent:
A unicode string of the user agent to use when fetching the URL
:param url:
A unicode string of the URL to fetch the CRL from
:param timeout:
The number of seconds after which an HTTP request should timeout
:return:
An asn1crypto.crl.CertificateList object
"""
request = Request(url)
request.add_header('Accept', 'application/pkix-crl')
request.add_header('User-Agent', user_agent)
response = urlopen(request, None, timeout)
data = response.read()
if pem.detect(data):
_, _, data = pem.unarmor(data)
return crl.CertificateList.load(data) | [
"def",
"_grab_crl",
"(",
"user_agent",
",",
"url",
",",
"timeout",
")",
":",
"request",
"=",
"Request",
"(",
"url",
")",
"request",
".",
"add_header",
"(",
"'Accept'",
",",
"'application/pkix-crl'",
")",
"request",
".",
"add_header",
"(",
"'User-Agent'",
","... | Fetches a CRL and parses it
:param user_agent:
A unicode string of the user agent to use when fetching the URL
:param url:
A unicode string of the URL to fetch the CRL from
:param timeout:
The number of seconds after which an HTTP request should timeout
:return:
An asn1crypto.crl.CertificateList object | [
"Fetches",
"a",
"CRL",
"and",
"parses",
"it"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/crl_client.py#L57-L80 | train | 23,286 |
wbond/certvalidator | certvalidator/crl_client.py | fetch_certs | def fetch_certs(certificate_list, user_agent=None, timeout=10):
"""
Fetches certificates from the authority information access extension of
an asn1crypto.crl.CertificateList object and places them into the
cert registry.
:param certificate_list:
An asn1crypto.crl.CertificateList object
:param user_agent:
The HTTP user agent to use when requesting the CRL. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
A list of any asn1crypto.x509.Certificate objects that were fetched
"""
output = []
if user_agent is None:
user_agent = 'certvalidator %s' % __version__
elif not isinstance(user_agent, str_cls):
raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))
for url in certificate_list.issuer_cert_urls:
request = Request(url)
request.add_header('Accept', 'application/pkix-cert,application/pkcs7-mime')
request.add_header('User-Agent', user_agent)
response = urlopen(request, None, timeout)
content_type = response.headers['Content-Type'].strip()
response_data = response.read()
if content_type == 'application/pkix-cert':
output.append(x509.Certificate.load(response_data))
elif content_type == 'application/pkcs7-mime':
signed_data = cms.SignedData.load(response_data)
if isinstance(signed_data['certificates'], cms.CertificateSet):
for cert_choice in signed_data['certificates']:
if cert_choice.name == 'certificate':
output.append(cert_choice.chosen)
else:
raise ValueError('Unknown content type of %s when fetching issuer certificate for CRL' % repr(content_type))
return output | python | def fetch_certs(certificate_list, user_agent=None, timeout=10):
"""
Fetches certificates from the authority information access extension of
an asn1crypto.crl.CertificateList object and places them into the
cert registry.
:param certificate_list:
An asn1crypto.crl.CertificateList object
:param user_agent:
The HTTP user agent to use when requesting the CRL. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
A list of any asn1crypto.x509.Certificate objects that were fetched
"""
output = []
if user_agent is None:
user_agent = 'certvalidator %s' % __version__
elif not isinstance(user_agent, str_cls):
raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))
for url in certificate_list.issuer_cert_urls:
request = Request(url)
request.add_header('Accept', 'application/pkix-cert,application/pkcs7-mime')
request.add_header('User-Agent', user_agent)
response = urlopen(request, None, timeout)
content_type = response.headers['Content-Type'].strip()
response_data = response.read()
if content_type == 'application/pkix-cert':
output.append(x509.Certificate.load(response_data))
elif content_type == 'application/pkcs7-mime':
signed_data = cms.SignedData.load(response_data)
if isinstance(signed_data['certificates'], cms.CertificateSet):
for cert_choice in signed_data['certificates']:
if cert_choice.name == 'certificate':
output.append(cert_choice.chosen)
else:
raise ValueError('Unknown content type of %s when fetching issuer certificate for CRL' % repr(content_type))
return output | [
"def",
"fetch_certs",
"(",
"certificate_list",
",",
"user_agent",
"=",
"None",
",",
"timeout",
"=",
"10",
")",
":",
"output",
"=",
"[",
"]",
"if",
"user_agent",
"is",
"None",
":",
"user_agent",
"=",
"'certvalidator %s'",
"%",
"__version__",
"elif",
"not",
... | Fetches certificates from the authority information access extension of
an asn1crypto.crl.CertificateList object and places them into the
cert registry.
:param certificate_list:
An asn1crypto.crl.CertificateList object
:param user_agent:
The HTTP user agent to use when requesting the CRL. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
A list of any asn1crypto.x509.Certificate objects that were fetched | [
"Fetches",
"certificates",
"from",
"the",
"authority",
"information",
"access",
"extension",
"of",
"an",
"asn1crypto",
".",
"crl",
".",
"CertificateList",
"object",
"and",
"places",
"them",
"into",
"the",
"cert",
"registry",
"."
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/crl_client.py#L83-L135 | train | 23,287 |
wbond/certvalidator | certvalidator/__init__.py | CertificateValidator.validate_usage | def validate_usage(self, key_usage, extended_key_usage=None, extended_optional=False):
"""
Validates the certificate path and that the certificate is valid for
the key usage and extended key usage purposes specified.
:param key_usage:
A set of unicode strings of the required key usage purposes. Valid
values include:
- "digital_signature"
- "non_repudiation"
- "key_encipherment"
- "data_encipherment"
- "key_agreement"
- "key_cert_sign"
- "crl_sign"
- "encipher_only"
- "decipher_only"
:param extended_key_usage:
A set of unicode strings of the required extended key usage
purposes. These must be either dotted number OIDs, or one of the
following extended key usage purposes:
- "server_auth"
- "client_auth"
- "code_signing"
- "email_protection"
- "ipsec_end_system"
- "ipsec_tunnel"
- "ipsec_user"
- "time_stamping"
- "ocsp_signing"
- "wireless_access_points"
An example of a dotted number OID:
- "1.3.6.1.5.5.7.3.1"
:param extended_optional:
A bool - if the extended_key_usage extension may be ommited and still
considered valid
:raises:
certvalidator.errors.PathValidationError - when an error occurs validating the path
certvalidator.errors.RevokedError - when the certificate or another certificate in its path has been revoked
certvalidator.errors.InvalidCertificateError - when the certificate is not valid for the usages specified
:return:
A certvalidator.path.ValidationPath object of the validated
certificate validation path
"""
self._validate_path()
validate_usage(
self._context,
self._certificate,
key_usage,
extended_key_usage,
extended_optional
)
return self._path | python | def validate_usage(self, key_usage, extended_key_usage=None, extended_optional=False):
"""
Validates the certificate path and that the certificate is valid for
the key usage and extended key usage purposes specified.
:param key_usage:
A set of unicode strings of the required key usage purposes. Valid
values include:
- "digital_signature"
- "non_repudiation"
- "key_encipherment"
- "data_encipherment"
- "key_agreement"
- "key_cert_sign"
- "crl_sign"
- "encipher_only"
- "decipher_only"
:param extended_key_usage:
A set of unicode strings of the required extended key usage
purposes. These must be either dotted number OIDs, or one of the
following extended key usage purposes:
- "server_auth"
- "client_auth"
- "code_signing"
- "email_protection"
- "ipsec_end_system"
- "ipsec_tunnel"
- "ipsec_user"
- "time_stamping"
- "ocsp_signing"
- "wireless_access_points"
An example of a dotted number OID:
- "1.3.6.1.5.5.7.3.1"
:param extended_optional:
A bool - if the extended_key_usage extension may be ommited and still
considered valid
:raises:
certvalidator.errors.PathValidationError - when an error occurs validating the path
certvalidator.errors.RevokedError - when the certificate or another certificate in its path has been revoked
certvalidator.errors.InvalidCertificateError - when the certificate is not valid for the usages specified
:return:
A certvalidator.path.ValidationPath object of the validated
certificate validation path
"""
self._validate_path()
validate_usage(
self._context,
self._certificate,
key_usage,
extended_key_usage,
extended_optional
)
return self._path | [
"def",
"validate_usage",
"(",
"self",
",",
"key_usage",
",",
"extended_key_usage",
"=",
"None",
",",
"extended_optional",
"=",
"False",
")",
":",
"self",
".",
"_validate_path",
"(",
")",
"validate_usage",
"(",
"self",
".",
"_context",
",",
"self",
".",
"_cer... | Validates the certificate path and that the certificate is valid for
the key usage and extended key usage purposes specified.
:param key_usage:
A set of unicode strings of the required key usage purposes. Valid
values include:
- "digital_signature"
- "non_repudiation"
- "key_encipherment"
- "data_encipherment"
- "key_agreement"
- "key_cert_sign"
- "crl_sign"
- "encipher_only"
- "decipher_only"
:param extended_key_usage:
A set of unicode strings of the required extended key usage
purposes. These must be either dotted number OIDs, or one of the
following extended key usage purposes:
- "server_auth"
- "client_auth"
- "code_signing"
- "email_protection"
- "ipsec_end_system"
- "ipsec_tunnel"
- "ipsec_user"
- "time_stamping"
- "ocsp_signing"
- "wireless_access_points"
An example of a dotted number OID:
- "1.3.6.1.5.5.7.3.1"
:param extended_optional:
A bool - if the extended_key_usage extension may be ommited and still
considered valid
:raises:
certvalidator.errors.PathValidationError - when an error occurs validating the path
certvalidator.errors.RevokedError - when the certificate or another certificate in its path has been revoked
certvalidator.errors.InvalidCertificateError - when the certificate is not valid for the usages specified
:return:
A certvalidator.path.ValidationPath object of the validated
certificate validation path | [
"Validates",
"the",
"certificate",
"path",
"and",
"that",
"the",
"certificate",
"is",
"valid",
"for",
"the",
"key",
"usage",
"and",
"extended",
"key",
"usage",
"purposes",
"specified",
"."
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/__init__.py#L140-L201 | train | 23,288 |
wbond/certvalidator | certvalidator/__init__.py | CertificateValidator.validate_tls | def validate_tls(self, hostname):
"""
Validates the certificate path, that the certificate is valid for
the hostname provided and that the certificate is valid for the purpose
of a TLS connection.
:param hostname:
A unicode string of the TLS server hostname
:raises:
certvalidator.errors.PathValidationError - when an error occurs validating the path
certvalidator.errors.RevokedError - when the certificate or another certificate in its path has been revoked
certvalidator.errors.InvalidCertificateError - when the certificate is not valid for TLS or the hostname
:return:
A certvalidator.path.ValidationPath object of the validated
certificate validation path
"""
self._validate_path()
validate_tls_hostname(self._context, self._certificate, hostname)
return self._path | python | def validate_tls(self, hostname):
"""
Validates the certificate path, that the certificate is valid for
the hostname provided and that the certificate is valid for the purpose
of a TLS connection.
:param hostname:
A unicode string of the TLS server hostname
:raises:
certvalidator.errors.PathValidationError - when an error occurs validating the path
certvalidator.errors.RevokedError - when the certificate or another certificate in its path has been revoked
certvalidator.errors.InvalidCertificateError - when the certificate is not valid for TLS or the hostname
:return:
A certvalidator.path.ValidationPath object of the validated
certificate validation path
"""
self._validate_path()
validate_tls_hostname(self._context, self._certificate, hostname)
return self._path | [
"def",
"validate_tls",
"(",
"self",
",",
"hostname",
")",
":",
"self",
".",
"_validate_path",
"(",
")",
"validate_tls_hostname",
"(",
"self",
".",
"_context",
",",
"self",
".",
"_certificate",
",",
"hostname",
")",
"return",
"self",
".",
"_path"
] | Validates the certificate path, that the certificate is valid for
the hostname provided and that the certificate is valid for the purpose
of a TLS connection.
:param hostname:
A unicode string of the TLS server hostname
:raises:
certvalidator.errors.PathValidationError - when an error occurs validating the path
certvalidator.errors.RevokedError - when the certificate or another certificate in its path has been revoked
certvalidator.errors.InvalidCertificateError - when the certificate is not valid for TLS or the hostname
:return:
A certvalidator.path.ValidationPath object of the validated
certificate validation path | [
"Validates",
"the",
"certificate",
"path",
"that",
"the",
"certificate",
"is",
"valid",
"for",
"the",
"hostname",
"provided",
"and",
"that",
"the",
"certificate",
"is",
"valid",
"for",
"the",
"purpose",
"of",
"a",
"TLS",
"connection",
"."
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/__init__.py#L203-L224 | train | 23,289 |
wbond/certvalidator | certvalidator/context.py | ValidationContext.crls | def crls(self):
"""
A list of all cached asn1crypto.crl.CertificateList objects
"""
if not self._allow_fetching:
return self._crls
output = []
for issuer_serial in self._fetched_crls:
output.extend(self._fetched_crls[issuer_serial])
return output | python | def crls(self):
"""
A list of all cached asn1crypto.crl.CertificateList objects
"""
if not self._allow_fetching:
return self._crls
output = []
for issuer_serial in self._fetched_crls:
output.extend(self._fetched_crls[issuer_serial])
return output | [
"def",
"crls",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_allow_fetching",
":",
"return",
"self",
".",
"_crls",
"output",
"=",
"[",
"]",
"for",
"issuer_serial",
"in",
"self",
".",
"_fetched_crls",
":",
"output",
".",
"extend",
"(",
"self",
".",... | A list of all cached asn1crypto.crl.CertificateList objects | [
"A",
"list",
"of",
"all",
"cached",
"asn1crypto",
".",
"crl",
".",
"CertificateList",
"objects"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/context.py#L367-L378 | train | 23,290 |
wbond/certvalidator | certvalidator/context.py | ValidationContext.ocsps | def ocsps(self):
"""
A list of all cached asn1crypto.ocsp.OCSPResponse objects
"""
if not self._allow_fetching:
return self._ocsps
output = []
for issuer_serial in self._fetched_ocsps:
output.extend(self._fetched_ocsps[issuer_serial])
return output | python | def ocsps(self):
"""
A list of all cached asn1crypto.ocsp.OCSPResponse objects
"""
if not self._allow_fetching:
return self._ocsps
output = []
for issuer_serial in self._fetched_ocsps:
output.extend(self._fetched_ocsps[issuer_serial])
return output | [
"def",
"ocsps",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_allow_fetching",
":",
"return",
"self",
".",
"_ocsps",
"output",
"=",
"[",
"]",
"for",
"issuer_serial",
"in",
"self",
".",
"_fetched_ocsps",
":",
"output",
".",
"extend",
"(",
"self",
"... | A list of all cached asn1crypto.ocsp.OCSPResponse objects | [
"A",
"list",
"of",
"all",
"cached",
"asn1crypto",
".",
"ocsp",
".",
"OCSPResponse",
"objects"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/context.py#L381-L392 | train | 23,291 |
wbond/certvalidator | certvalidator/context.py | ValidationContext._extract_ocsp_certs | def _extract_ocsp_certs(self, ocsp_response):
"""
Extracts any certificates included with an OCSP response and adds them
to the certificate registry
:param ocsp_response:
An asn1crypto.ocsp.OCSPResponse object to look for certs inside of
"""
status = ocsp_response['response_status'].native
if status == 'successful':
response_bytes = ocsp_response['response_bytes']
if response_bytes['response_type'].native == 'basic_ocsp_response':
response = response_bytes['response'].parsed
if response['certs']:
for other_cert in response['certs']:
if self.certificate_registry.add_other_cert(other_cert):
self._revocation_certs[other_cert.issuer_serial] = other_cert | python | def _extract_ocsp_certs(self, ocsp_response):
"""
Extracts any certificates included with an OCSP response and adds them
to the certificate registry
:param ocsp_response:
An asn1crypto.ocsp.OCSPResponse object to look for certs inside of
"""
status = ocsp_response['response_status'].native
if status == 'successful':
response_bytes = ocsp_response['response_bytes']
if response_bytes['response_type'].native == 'basic_ocsp_response':
response = response_bytes['response'].parsed
if response['certs']:
for other_cert in response['certs']:
if self.certificate_registry.add_other_cert(other_cert):
self._revocation_certs[other_cert.issuer_serial] = other_cert | [
"def",
"_extract_ocsp_certs",
"(",
"self",
",",
"ocsp_response",
")",
":",
"status",
"=",
"ocsp_response",
"[",
"'response_status'",
"]",
".",
"native",
"if",
"status",
"==",
"'successful'",
":",
"response_bytes",
"=",
"ocsp_response",
"[",
"'response_bytes'",
"]"... | Extracts any certificates included with an OCSP response and adds them
to the certificate registry
:param ocsp_response:
An asn1crypto.ocsp.OCSPResponse object to look for certs inside of | [
"Extracts",
"any",
"certificates",
"included",
"with",
"an",
"OCSP",
"response",
"and",
"adds",
"them",
"to",
"the",
"certificate",
"registry"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/context.py#L516-L533 | train | 23,292 |
wbond/certvalidator | certvalidator/context.py | ValidationContext.check_validation | def check_validation(self, cert):
"""
Checks to see if a certificate has been validated, and if so, returns
the ValidationPath used to validate it.
:param cert:
An asn1crypto.x509.Certificate object
:return:
None if not validated, or a certvalidator.path.ValidationPath
object of the validation path
"""
# CA certs are automatically trusted since they are from the trust list
if self.certificate_registry.is_ca(cert) and cert.signature not in self._validate_map:
self._validate_map[cert.signature] = ValidationPath(cert)
return self._validate_map.get(cert.signature) | python | def check_validation(self, cert):
"""
Checks to see if a certificate has been validated, and if so, returns
the ValidationPath used to validate it.
:param cert:
An asn1crypto.x509.Certificate object
:return:
None if not validated, or a certvalidator.path.ValidationPath
object of the validation path
"""
# CA certs are automatically trusted since they are from the trust list
if self.certificate_registry.is_ca(cert) and cert.signature not in self._validate_map:
self._validate_map[cert.signature] = ValidationPath(cert)
return self._validate_map.get(cert.signature) | [
"def",
"check_validation",
"(",
"self",
",",
"cert",
")",
":",
"# CA certs are automatically trusted since they are from the trust list",
"if",
"self",
".",
"certificate_registry",
".",
"is_ca",
"(",
"cert",
")",
"and",
"cert",
".",
"signature",
"not",
"in",
"self",
... | Checks to see if a certificate has been validated, and if so, returns
the ValidationPath used to validate it.
:param cert:
An asn1crypto.x509.Certificate object
:return:
None if not validated, or a certvalidator.path.ValidationPath
object of the validation path | [
"Checks",
"to",
"see",
"if",
"a",
"certificate",
"has",
"been",
"validated",
"and",
"if",
"so",
"returns",
"the",
"ValidationPath",
"used",
"to",
"validate",
"it",
"."
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/context.py#L550-L567 | train | 23,293 |
wbond/certvalidator | certvalidator/context.py | ValidationContext.clear_validation | def clear_validation(self, cert):
"""
Clears the record that a certificate has been validated
:param cert:
An ans1crypto.x509.Certificate object
"""
if cert.signature in self._validate_map:
del self._validate_map[cert.signature] | python | def clear_validation(self, cert):
"""
Clears the record that a certificate has been validated
:param cert:
An ans1crypto.x509.Certificate object
"""
if cert.signature in self._validate_map:
del self._validate_map[cert.signature] | [
"def",
"clear_validation",
"(",
"self",
",",
"cert",
")",
":",
"if",
"cert",
".",
"signature",
"in",
"self",
".",
"_validate_map",
":",
"del",
"self",
".",
"_validate_map",
"[",
"cert",
".",
"signature",
"]"
] | Clears the record that a certificate has been validated
:param cert:
An ans1crypto.x509.Certificate object | [
"Clears",
"the",
"record",
"that",
"a",
"certificate",
"has",
"been",
"validated"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/context.py#L569-L578 | train | 23,294 |
wbond/certvalidator | certvalidator/validate.py | _find_cert_in_list | def _find_cert_in_list(cert, issuer, certificate_list, crl_issuer):
"""
Looks for a cert in the list of revoked certificates
:param cert:
An asn1crypto.x509.Certificate object of the cert being checked
:param issuer:
An asn1crypto.x509.Certificate object of the cert issuer
:param certificate_list:
An ans1crypto.crl.CertificateList object to look in for the cert
:param crl_issuer:
An asn1crypto.x509.Certificate object of the CRL issuer
:return:
A tuple of (None, None) if not present, otherwise a tuple of
(asn1crypto.x509.Time object, asn1crypto.crl.CRLReason object)
representing the date/time the object was revoked and why
"""
revoked_certificates = certificate_list['tbs_cert_list']['revoked_certificates']
cert_serial = cert.serial_number
issuer_name = issuer.subject
known_extensions = set([
'crl_reason',
'hold_instruction_code',
'invalidity_date',
'certificate_issuer'
])
last_issuer_name = crl_issuer.subject
for revoked_cert in revoked_certificates:
# If any unknown critical extensions, the entry can not be used
if revoked_cert.critical_extensions - known_extensions:
raise NotImplementedError()
if revoked_cert.issuer_name and revoked_cert.issuer_name != last_issuer_name:
last_issuer_name = revoked_cert.issuer_name
if last_issuer_name != issuer_name:
continue
if revoked_cert['user_certificate'].native != cert_serial:
continue
if not revoked_cert.crl_reason_value:
crl_reason = crl.CRLReason('unspecified')
else:
crl_reason = revoked_cert.crl_reason_value
return (revoked_cert['revocation_date'], crl_reason)
return (None, None) | python | def _find_cert_in_list(cert, issuer, certificate_list, crl_issuer):
"""
Looks for a cert in the list of revoked certificates
:param cert:
An asn1crypto.x509.Certificate object of the cert being checked
:param issuer:
An asn1crypto.x509.Certificate object of the cert issuer
:param certificate_list:
An ans1crypto.crl.CertificateList object to look in for the cert
:param crl_issuer:
An asn1crypto.x509.Certificate object of the CRL issuer
:return:
A tuple of (None, None) if not present, otherwise a tuple of
(asn1crypto.x509.Time object, asn1crypto.crl.CRLReason object)
representing the date/time the object was revoked and why
"""
revoked_certificates = certificate_list['tbs_cert_list']['revoked_certificates']
cert_serial = cert.serial_number
issuer_name = issuer.subject
known_extensions = set([
'crl_reason',
'hold_instruction_code',
'invalidity_date',
'certificate_issuer'
])
last_issuer_name = crl_issuer.subject
for revoked_cert in revoked_certificates:
# If any unknown critical extensions, the entry can not be used
if revoked_cert.critical_extensions - known_extensions:
raise NotImplementedError()
if revoked_cert.issuer_name and revoked_cert.issuer_name != last_issuer_name:
last_issuer_name = revoked_cert.issuer_name
if last_issuer_name != issuer_name:
continue
if revoked_cert['user_certificate'].native != cert_serial:
continue
if not revoked_cert.crl_reason_value:
crl_reason = crl.CRLReason('unspecified')
else:
crl_reason = revoked_cert.crl_reason_value
return (revoked_cert['revocation_date'], crl_reason)
return (None, None) | [
"def",
"_find_cert_in_list",
"(",
"cert",
",",
"issuer",
",",
"certificate_list",
",",
"crl_issuer",
")",
":",
"revoked_certificates",
"=",
"certificate_list",
"[",
"'tbs_cert_list'",
"]",
"[",
"'revoked_certificates'",
"]",
"cert_serial",
"=",
"cert",
".",
"serial_... | Looks for a cert in the list of revoked certificates
:param cert:
An asn1crypto.x509.Certificate object of the cert being checked
:param issuer:
An asn1crypto.x509.Certificate object of the cert issuer
:param certificate_list:
An ans1crypto.crl.CertificateList object to look in for the cert
:param crl_issuer:
An asn1crypto.x509.Certificate object of the CRL issuer
:return:
A tuple of (None, None) if not present, otherwise a tuple of
(asn1crypto.x509.Time object, asn1crypto.crl.CRLReason object)
representing the date/time the object was revoked and why | [
"Looks",
"for",
"a",
"cert",
"in",
"the",
"list",
"of",
"revoked",
"certificates"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/validate.py#L1784-L1839 | train | 23,295 |
wbond/certvalidator | certvalidator/validate.py | PolicyTreeRoot.add_child | def add_child(self, valid_policy, qualifier_set, expected_policy_set):
"""
Creates a new PolicyTreeNode as a child of this node
:param valid_policy:
A unicode string of a policy name or OID
:param qualifier_set:
An instance of asn1crypto.x509.PolicyQualifierInfos
:param expected_policy_set:
A set of unicode strings containing policy names or OIDs
"""
child = PolicyTreeNode(valid_policy, qualifier_set, expected_policy_set)
child.parent = self
self.children.append(child) | python | def add_child(self, valid_policy, qualifier_set, expected_policy_set):
"""
Creates a new PolicyTreeNode as a child of this node
:param valid_policy:
A unicode string of a policy name or OID
:param qualifier_set:
An instance of asn1crypto.x509.PolicyQualifierInfos
:param expected_policy_set:
A set of unicode strings containing policy names or OIDs
"""
child = PolicyTreeNode(valid_policy, qualifier_set, expected_policy_set)
child.parent = self
self.children.append(child) | [
"def",
"add_child",
"(",
"self",
",",
"valid_policy",
",",
"qualifier_set",
",",
"expected_policy_set",
")",
":",
"child",
"=",
"PolicyTreeNode",
"(",
"valid_policy",
",",
"qualifier_set",
",",
"expected_policy_set",
")",
"child",
".",
"parent",
"=",
"self",
"se... | Creates a new PolicyTreeNode as a child of this node
:param valid_policy:
A unicode string of a policy name or OID
:param qualifier_set:
An instance of asn1crypto.x509.PolicyQualifierInfos
:param expected_policy_set:
A set of unicode strings containing policy names or OIDs | [
"Creates",
"a",
"new",
"PolicyTreeNode",
"as",
"a",
"child",
"of",
"this",
"node"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/validate.py#L1871-L1887 | train | 23,296 |
wbond/certvalidator | certvalidator/validate.py | PolicyTreeRoot.at_depth | def at_depth(self, depth):
"""
Returns a generator yielding all nodes in the tree at a specific depth
:param depth:
An integer >= 0 of the depth of nodes to yield
:return:
A generator yielding PolicyTreeNode objects
"""
for child in list(self.children):
if depth == 0:
yield child
else:
for grandchild in child.at_depth(depth - 1):
yield grandchild | python | def at_depth(self, depth):
"""
Returns a generator yielding all nodes in the tree at a specific depth
:param depth:
An integer >= 0 of the depth of nodes to yield
:return:
A generator yielding PolicyTreeNode objects
"""
for child in list(self.children):
if depth == 0:
yield child
else:
for grandchild in child.at_depth(depth - 1):
yield grandchild | [
"def",
"at_depth",
"(",
"self",
",",
"depth",
")",
":",
"for",
"child",
"in",
"list",
"(",
"self",
".",
"children",
")",
":",
"if",
"depth",
"==",
"0",
":",
"yield",
"child",
"else",
":",
"for",
"grandchild",
"in",
"child",
".",
"at_depth",
"(",
"d... | Returns a generator yielding all nodes in the tree at a specific depth
:param depth:
An integer >= 0 of the depth of nodes to yield
:return:
A generator yielding PolicyTreeNode objects | [
"Returns",
"a",
"generator",
"yielding",
"all",
"nodes",
"in",
"the",
"tree",
"at",
"a",
"specific",
"depth"
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/validate.py#L1899-L1915 | train | 23,297 |
wbond/certvalidator | certvalidator/validate.py | PolicyTreeRoot.walk_up | def walk_up(self, depth):
"""
Returns a generator yielding all nodes in the tree at a specific depth,
or above. Yields nodes starting with leaves and traversing up to the
root.
:param depth:
An integer >= 0 of the depth of nodes to walk up from
:return:
A generator yielding PolicyTreeNode objects
"""
for child in list(self.children):
if depth != 0:
for grandchild in child.walk_up(depth - 1):
yield grandchild
yield child | python | def walk_up(self, depth):
"""
Returns a generator yielding all nodes in the tree at a specific depth,
or above. Yields nodes starting with leaves and traversing up to the
root.
:param depth:
An integer >= 0 of the depth of nodes to walk up from
:return:
A generator yielding PolicyTreeNode objects
"""
for child in list(self.children):
if depth != 0:
for grandchild in child.walk_up(depth - 1):
yield grandchild
yield child | [
"def",
"walk_up",
"(",
"self",
",",
"depth",
")",
":",
"for",
"child",
"in",
"list",
"(",
"self",
".",
"children",
")",
":",
"if",
"depth",
"!=",
"0",
":",
"for",
"grandchild",
"in",
"child",
".",
"walk_up",
"(",
"depth",
"-",
"1",
")",
":",
"yie... | Returns a generator yielding all nodes in the tree at a specific depth,
or above. Yields nodes starting with leaves and traversing up to the
root.
:param depth:
An integer >= 0 of the depth of nodes to walk up from
:return:
A generator yielding PolicyTreeNode objects | [
"Returns",
"a",
"generator",
"yielding",
"all",
"nodes",
"in",
"the",
"tree",
"at",
"a",
"specific",
"depth",
"or",
"above",
".",
"Yields",
"nodes",
"starting",
"with",
"leaves",
"and",
"traversing",
"up",
"to",
"the",
"root",
"."
] | c62233a713bcc36963e9d82323ec8d84f8e01485 | https://github.com/wbond/certvalidator/blob/c62233a713bcc36963e9d82323ec8d84f8e01485/certvalidator/validate.py#L1917-L1934 | train | 23,298 |
aio-libs/aiomcache | aiomcache/pool.py | MemcachePool.clear | def clear(self):
"""Clear pool connections."""
while not self._pool.empty():
conn = yield from self._pool.get()
self._do_close(conn) | python | def clear(self):
"""Clear pool connections."""
while not self._pool.empty():
conn = yield from self._pool.get()
self._do_close(conn) | [
"def",
"clear",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"_pool",
".",
"empty",
"(",
")",
":",
"conn",
"=",
"yield",
"from",
"self",
".",
"_pool",
".",
"get",
"(",
")",
"self",
".",
"_do_close",
"(",
"conn",
")"
] | Clear pool connections. | [
"Clear",
"pool",
"connections",
"."
] | 75d44b201aea91bc2856b10940922d5ebfbfcd7b | https://github.com/aio-libs/aiomcache/blob/75d44b201aea91bc2856b10940922d5ebfbfcd7b/aiomcache/pool.py#L23-L27 | train | 23,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.