repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/reader.py
|
read_variables
|
python
|
def read_variables(config_location: str) -> Dict[str, str]:
with open(config_location, "r") as config_file:
config_lines = config_file.readlines()
try:
return json.loads("".join(config_lines), parse_int=lambda num_str: str(num_str),
parse_float=lambda float_str: str(float_str))
except JSONDecodeError:
pass
config_lines = _shell_to_ini(config_lines)
return _read_ini_config("\n".join(config_lines))
|
Reads variables out of a config file. Variables can be in a ini file, a shell file used to source the variables
(i.e. one that has just got "export *" like statements in it) or in JSON.
:param config_location: the location of the config file
:return: dictionary where the variable names are key and their values are the values
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/reader.py#L12-L27
|
[
"def _shell_to_ini(shell_file_contents: List[str]) -> List[str]:\n \"\"\"\n Converts a shell file, which just contains comments and \"export *\" statements into an ini file.\n :param shell_file_contents: the contents of the shell file\n :return: lines of an equivalent ini file\n \"\"\"\n line_number = 0\n while line_number < len(shell_file_contents):\n line = shell_file_contents[line_number].strip()\n if \"=\" not in line:\n del shell_file_contents[line_number]\n else:\n if line.strip().startswith(_EXPORT_COMMAND):\n shell_file_contents[line_number] = line.replace(_EXPORT_COMMAND, \"\").strip()\n line_number += 1\n return shell_file_contents\n",
"def _read_ini_config(ini_file_contents: str) -> Dict[str, str]:\n \"\"\"\n Parses the given ini file contents and converts to a dictionary of key/value pairs.\n :param ini_file_contents: the contents of the ini file\n :return: dictionary where the variable names are key and their values are the values\n \"\"\"\n config = configparser.ConfigParser()\n config.optionxform = str\n config.read_string(_FAKE_SECTION + ini_file_contents)\n\n items = {}\n for section in config.sections():\n items.update(dict(config[section].items()))\n\n return items\n"
] |
import configparser
import json
from json import JSONDecodeError
from typing import Dict, List
_FAKE_SECTION_NAME = "all"
_FAKE_SECTION = "[%s]\n" % _FAKE_SECTION_NAME
_EXPORT_COMMAND = "export "
def _read_ini_config(ini_file_contents: str) -> Dict[str, str]:
"""
Parses the given ini file contents and converts to a dictionary of key/value pairs.
:param ini_file_contents: the contents of the ini file
:return: dictionary where the variable names are key and their values are the values
"""
config = configparser.ConfigParser()
config.optionxform = str
config.read_string(_FAKE_SECTION + ini_file_contents)
items = {}
for section in config.sections():
items.update(dict(config[section].items()))
return items
def _shell_to_ini(shell_file_contents: List[str]) -> List[str]:
"""
Converts a shell file, which just contains comments and "export *" statements into an ini file.
:param shell_file_contents: the contents of the shell file
:return: lines of an equivalent ini file
"""
line_number = 0
while line_number < len(shell_file_contents):
line = shell_file_contents[line_number].strip()
if "=" not in line:
del shell_file_contents[line_number]
else:
if line.strip().startswith(_EXPORT_COMMAND):
shell_file_contents[line_number] = line.replace(_EXPORT_COMMAND, "").strip()
line_number += 1
return shell_file_contents
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/reader.py
|
_read_ini_config
|
python
|
def _read_ini_config(ini_file_contents: str) -> Dict[str, str]:
config = configparser.ConfigParser()
config.optionxform = str
config.read_string(_FAKE_SECTION + ini_file_contents)
items = {}
for section in config.sections():
items.update(dict(config[section].items()))
return items
|
Parses the given ini file contents and converts to a dictionary of key/value pairs.
:param ini_file_contents: the contents of the ini file
:return: dictionary where the variable names are key and their values are the values
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/reader.py#L30-L44
| null |
import configparser
import json
from json import JSONDecodeError
from typing import Dict, List
_FAKE_SECTION_NAME = "all"
_FAKE_SECTION = "[%s]\n" % _FAKE_SECTION_NAME
_EXPORT_COMMAND = "export "
def read_variables(config_location: str) -> Dict[str, str]:
"""
Reads variables out of a config file. Variables can be in a ini file, a shell file used to source the variables
(i.e. one that has just got "export *" like statements in it) or in JSON.
:param config_location: the location of the config file
:return: dictionary where the variable names are key and their values are the values
"""
with open(config_location, "r") as config_file:
config_lines = config_file.readlines()
try:
return json.loads("".join(config_lines), parse_int=lambda num_str: str(num_str),
parse_float=lambda float_str: str(float_str))
except JSONDecodeError:
pass
config_lines = _shell_to_ini(config_lines)
return _read_ini_config("\n".join(config_lines))
def _shell_to_ini(shell_file_contents: List[str]) -> List[str]:
"""
Converts a shell file, which just contains comments and "export *" statements into an ini file.
:param shell_file_contents: the contents of the shell file
:return: lines of an equivalent ini file
"""
line_number = 0
while line_number < len(shell_file_contents):
line = shell_file_contents[line_number].strip()
if "=" not in line:
del shell_file_contents[line_number]
else:
if line.strip().startswith(_EXPORT_COMMAND):
shell_file_contents[line_number] = line.replace(_EXPORT_COMMAND, "").strip()
line_number += 1
return shell_file_contents
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/reader.py
|
_shell_to_ini
|
python
|
def _shell_to_ini(shell_file_contents: List[str]) -> List[str]:
line_number = 0
while line_number < len(shell_file_contents):
line = shell_file_contents[line_number].strip()
if "=" not in line:
del shell_file_contents[line_number]
else:
if line.strip().startswith(_EXPORT_COMMAND):
shell_file_contents[line_number] = line.replace(_EXPORT_COMMAND, "").strip()
line_number += 1
return shell_file_contents
|
Converts a shell file, which just contains comments and "export *" statements into an ini file.
:param shell_file_contents: the contents of the shell file
:return: lines of an equivalent ini file
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/reader.py#L47-L62
| null |
import configparser
import json
from json import JSONDecodeError
from typing import Dict, List
_FAKE_SECTION_NAME = "all"
_FAKE_SECTION = "[%s]\n" % _FAKE_SECTION_NAME
_EXPORT_COMMAND = "export "
def read_variables(config_location: str) -> Dict[str, str]:
"""
Reads variables out of a config file. Variables can be in a ini file, a shell file used to source the variables
(i.e. one that has just got "export *" like statements in it) or in JSON.
:param config_location: the location of the config file
:return: dictionary where the variable names are key and their values are the values
"""
with open(config_location, "r") as config_file:
config_lines = config_file.readlines()
try:
return json.loads("".join(config_lines), parse_int=lambda num_str: str(num_str),
parse_float=lambda float_str: str(float_str))
except JSONDecodeError:
pass
config_lines = _shell_to_ini(config_lines)
return _read_ini_config("\n".join(config_lines))
def _read_ini_config(ini_file_contents: str) -> Dict[str, str]:
"""
Parses the given ini file contents and converts to a dictionary of key/value pairs.
:param ini_file_contents: the contents of the ini file
:return: dictionary where the variable names are key and their values are the values
"""
config = configparser.ConfigParser()
config.optionxform = str
config.read_string(_FAKE_SECTION + ini_file_contents)
items = {}
for section in config.sections():
items.update(dict(config[section].items()))
return items
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/manager.py
|
ProjectVariablesManager.get
|
python
|
def get(self) -> Dict[str, str]:
variables = self._project.variables.list(all=True)
return {variable.key: variable.value for variable in variables}
|
Gets the build variables for the project.
:return: the build variables
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/manager.py#L43-L49
| null |
class ProjectVariablesManager:
"""
Manages the build variables used by a project.
"""
def __init__(self, gitlab_config: GitLabConfig, project: str):
"""
Constructor.
:param gitlab_config: configuration to access GitLab
:param project: the project of interest (preferably namespaced, e.g. "hgi/my-project")
"""
self._connector = Gitlab(gitlab_config.location, gitlab_config.token, ssl_verify=SSL_VERIFY)
self._connector.auth()
try:
self._project = self._connector.projects.get(project)
except GitlabGetError as e:
if "Project Not Found" in e.error_message:
raise ValueError("Project '%s' not found. Valid projects are: %s"
% (project,
[project.path_with_namespace for project in self._connector.projects.list()]))
def clear(self):
"""
Clears all of the build variables.
"""
for variable in self._project.variables.list(all=True):
variable.delete()
def remove(self, variables: Union[Iterable[str], Dict[str, str]]=None):
"""
Removes the given variables. Will only remove a key if it has the given value if the value has been defined.
:param variables: the variables to remove
"""
keys = list(variables.keys()) if isinstance(variables, Dict) else variables # type: Iterable[str]
for key in keys:
variable = self._project.variables.get(key)
if isinstance(variables, Dict):
if variables[key] != variable.value:
continue
variable.delete()
def set(self, variables: Dict[str, str]):
"""
Sets the build variables (i.e. removes old ones, adds new ones)
:param variables: the build variables to set
"""
current_variables = self.get()
difference = DictDiffer(variables, current_variables)
removed_keys = difference.removed()
self.remove(removed_keys)
changed_keys = difference.added() | difference.changed()
changed = {key: variables[key] for key in changed_keys}
self.add(changed, overwrite=True)
def add(self, variables: Dict[str, str], overwrite: bool=False):
"""
Adds the given build variables to those that already exist.
:param variables: the build variables to add
:param overwrite: whether the old variable should be overwritten in the case of a redefinition
"""
preset_variables = self._project.variables.list()
preset_variable_keys = [variable.key for variable in preset_variables]
for key, value in variables.items():
if key in preset_variable_keys:
if overwrite:
variable = preset_variables[preset_variable_keys.index(key)]
variable.value = value
variable.save()
else:
variable = self._project.variables.create({
_VARIABLE_KEY_PROPERTY: key, _VARIABLE_VALUE_PROPERTY: value})
variable.save()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/manager.py
|
ProjectVariablesManager.clear
|
python
|
def clear(self):
for variable in self._project.variables.list(all=True):
variable.delete()
|
Clears all of the build variables.
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/manager.py#L51-L56
| null |
class ProjectVariablesManager:
"""
Manages the build variables used by a project.
"""
def __init__(self, gitlab_config: GitLabConfig, project: str):
"""
Constructor.
:param gitlab_config: configuration to access GitLab
:param project: the project of interest (preferably namespaced, e.g. "hgi/my-project")
"""
self._connector = Gitlab(gitlab_config.location, gitlab_config.token, ssl_verify=SSL_VERIFY)
self._connector.auth()
try:
self._project = self._connector.projects.get(project)
except GitlabGetError as e:
if "Project Not Found" in e.error_message:
raise ValueError("Project '%s' not found. Valid projects are: %s"
% (project,
[project.path_with_namespace for project in self._connector.projects.list()]))
def get(self) -> Dict[str, str]:
"""
Gets the build variables for the project.
:return: the build variables
"""
variables = self._project.variables.list(all=True)
return {variable.key: variable.value for variable in variables}
def remove(self, variables: Union[Iterable[str], Dict[str, str]]=None):
"""
Removes the given variables. Will only remove a key if it has the given value if the value has been defined.
:param variables: the variables to remove
"""
keys = list(variables.keys()) if isinstance(variables, Dict) else variables # type: Iterable[str]
for key in keys:
variable = self._project.variables.get(key)
if isinstance(variables, Dict):
if variables[key] != variable.value:
continue
variable.delete()
def set(self, variables: Dict[str, str]):
"""
Sets the build variables (i.e. removes old ones, adds new ones)
:param variables: the build variables to set
"""
current_variables = self.get()
difference = DictDiffer(variables, current_variables)
removed_keys = difference.removed()
self.remove(removed_keys)
changed_keys = difference.added() | difference.changed()
changed = {key: variables[key] for key in changed_keys}
self.add(changed, overwrite=True)
def add(self, variables: Dict[str, str], overwrite: bool=False):
"""
Adds the given build variables to those that already exist.
:param variables: the build variables to add
:param overwrite: whether the old variable should be overwritten in the case of a redefinition
"""
preset_variables = self._project.variables.list()
preset_variable_keys = [variable.key for variable in preset_variables]
for key, value in variables.items():
if key in preset_variable_keys:
if overwrite:
variable = preset_variables[preset_variable_keys.index(key)]
variable.value = value
variable.save()
else:
variable = self._project.variables.create({
_VARIABLE_KEY_PROPERTY: key, _VARIABLE_VALUE_PROPERTY: value})
variable.save()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/manager.py
|
ProjectVariablesManager.remove
|
python
|
def remove(self, variables: Union[Iterable[str], Dict[str, str]]=None):
keys = list(variables.keys()) if isinstance(variables, Dict) else variables # type: Iterable[str]
for key in keys:
variable = self._project.variables.get(key)
if isinstance(variables, Dict):
if variables[key] != variable.value:
continue
variable.delete()
|
Removes the given variables. Will only remove a key if it has the given value if the value has been defined.
:param variables: the variables to remove
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/manager.py#L58-L69
| null |
class ProjectVariablesManager:
"""
Manages the build variables used by a project.
"""
def __init__(self, gitlab_config: GitLabConfig, project: str):
"""
Constructor.
:param gitlab_config: configuration to access GitLab
:param project: the project of interest (preferably namespaced, e.g. "hgi/my-project")
"""
self._connector = Gitlab(gitlab_config.location, gitlab_config.token, ssl_verify=SSL_VERIFY)
self._connector.auth()
try:
self._project = self._connector.projects.get(project)
except GitlabGetError as e:
if "Project Not Found" in e.error_message:
raise ValueError("Project '%s' not found. Valid projects are: %s"
% (project,
[project.path_with_namespace for project in self._connector.projects.list()]))
def get(self) -> Dict[str, str]:
"""
Gets the build variables for the project.
:return: the build variables
"""
variables = self._project.variables.list(all=True)
return {variable.key: variable.value for variable in variables}
def clear(self):
"""
Clears all of the build variables.
"""
for variable in self._project.variables.list(all=True):
variable.delete()
def set(self, variables: Dict[str, str]):
"""
Sets the build variables (i.e. removes old ones, adds new ones)
:param variables: the build variables to set
"""
current_variables = self.get()
difference = DictDiffer(variables, current_variables)
removed_keys = difference.removed()
self.remove(removed_keys)
changed_keys = difference.added() | difference.changed()
changed = {key: variables[key] for key in changed_keys}
self.add(changed, overwrite=True)
def add(self, variables: Dict[str, str], overwrite: bool=False):
"""
Adds the given build variables to those that already exist.
:param variables: the build variables to add
:param overwrite: whether the old variable should be overwritten in the case of a redefinition
"""
preset_variables = self._project.variables.list()
preset_variable_keys = [variable.key for variable in preset_variables]
for key, value in variables.items():
if key in preset_variable_keys:
if overwrite:
variable = preset_variables[preset_variable_keys.index(key)]
variable.value = value
variable.save()
else:
variable = self._project.variables.create({
_VARIABLE_KEY_PROPERTY: key, _VARIABLE_VALUE_PROPERTY: value})
variable.save()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/manager.py
|
ProjectVariablesManager.set
|
python
|
def set(self, variables: Dict[str, str]):
current_variables = self.get()
difference = DictDiffer(variables, current_variables)
removed_keys = difference.removed()
self.remove(removed_keys)
changed_keys = difference.added() | difference.changed()
changed = {key: variables[key] for key in changed_keys}
self.add(changed, overwrite=True)
|
Sets the build variables (i.e. removes old ones, adds new ones)
:param variables: the build variables to set
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/manager.py#L71-L84
|
[
"def get(self) -> Dict[str, str]:\n \"\"\"\n Gets the build variables for the project.\n :return: the build variables\n \"\"\"\n variables = self._project.variables.list(all=True)\n return {variable.key: variable.value for variable in variables}\n",
"def remove(self, variables: Union[Iterable[str], Dict[str, str]]=None):\n \"\"\"\n Removes the given variables. Will only remove a key if it has the given value if the value has been defined.\n :param variables: the variables to remove\n \"\"\"\n keys = list(variables.keys()) if isinstance(variables, Dict) else variables # type: Iterable[str]\n for key in keys:\n variable = self._project.variables.get(key)\n if isinstance(variables, Dict):\n if variables[key] != variable.value:\n continue\n variable.delete()\n",
"def add(self, variables: Dict[str, str], overwrite: bool=False):\n \"\"\"\n Adds the given build variables to those that already exist.\n :param variables: the build variables to add\n :param overwrite: whether the old variable should be overwritten in the case of a redefinition\n \"\"\"\n preset_variables = self._project.variables.list()\n preset_variable_keys = [variable.key for variable in preset_variables]\n\n for key, value in variables.items():\n if key in preset_variable_keys:\n if overwrite:\n variable = preset_variables[preset_variable_keys.index(key)]\n variable.value = value\n variable.save()\n else:\n variable = self._project.variables.create({\n _VARIABLE_KEY_PROPERTY: key, _VARIABLE_VALUE_PROPERTY: value})\n variable.save()\n"
] |
class ProjectVariablesManager:
"""
Manages the build variables used by a project.
"""
def __init__(self, gitlab_config: GitLabConfig, project: str):
"""
Constructor.
:param gitlab_config: configuration to access GitLab
:param project: the project of interest (preferably namespaced, e.g. "hgi/my-project")
"""
self._connector = Gitlab(gitlab_config.location, gitlab_config.token, ssl_verify=SSL_VERIFY)
self._connector.auth()
try:
self._project = self._connector.projects.get(project)
except GitlabGetError as e:
if "Project Not Found" in e.error_message:
raise ValueError("Project '%s' not found. Valid projects are: %s"
% (project,
[project.path_with_namespace for project in self._connector.projects.list()]))
def get(self) -> Dict[str, str]:
"""
Gets the build variables for the project.
:return: the build variables
"""
variables = self._project.variables.list(all=True)
return {variable.key: variable.value for variable in variables}
def clear(self):
"""
Clears all of the build variables.
"""
for variable in self._project.variables.list(all=True):
variable.delete()
def remove(self, variables: Union[Iterable[str], Dict[str, str]]=None):
"""
Removes the given variables. Will only remove a key if it has the given value if the value has been defined.
:param variables: the variables to remove
"""
keys = list(variables.keys()) if isinstance(variables, Dict) else variables # type: Iterable[str]
for key in keys:
variable = self._project.variables.get(key)
if isinstance(variables, Dict):
if variables[key] != variable.value:
continue
variable.delete()
def add(self, variables: Dict[str, str], overwrite: bool=False):
"""
Adds the given build variables to those that already exist.
:param variables: the build variables to add
:param overwrite: whether the old variable should be overwritten in the case of a redefinition
"""
preset_variables = self._project.variables.list()
preset_variable_keys = [variable.key for variable in preset_variables]
for key, value in variables.items():
if key in preset_variable_keys:
if overwrite:
variable = preset_variables[preset_variable_keys.index(key)]
variable.value = value
variable.save()
else:
variable = self._project.variables.create({
_VARIABLE_KEY_PROPERTY: key, _VARIABLE_VALUE_PROPERTY: value})
variable.save()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/manager.py
|
ProjectVariablesManager.add
|
python
|
def add(self, variables: Dict[str, str], overwrite: bool=False):
preset_variables = self._project.variables.list()
preset_variable_keys = [variable.key for variable in preset_variables]
for key, value in variables.items():
if key in preset_variable_keys:
if overwrite:
variable = preset_variables[preset_variable_keys.index(key)]
variable.value = value
variable.save()
else:
variable = self._project.variables.create({
_VARIABLE_KEY_PROPERTY: key, _VARIABLE_VALUE_PROPERTY: value})
variable.save()
|
Adds the given build variables to those that already exist.
:param variables: the build variables to add
:param overwrite: whether the old variable should be overwritten in the case of a redefinition
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/manager.py#L86-L104
| null |
class ProjectVariablesManager:
"""
Manages the build variables used by a project.
"""
def __init__(self, gitlab_config: GitLabConfig, project: str):
"""
Constructor.
:param gitlab_config: configuration to access GitLab
:param project: the project of interest (preferably namespaced, e.g. "hgi/my-project")
"""
self._connector = Gitlab(gitlab_config.location, gitlab_config.token, ssl_verify=SSL_VERIFY)
self._connector.auth()
try:
self._project = self._connector.projects.get(project)
except GitlabGetError as e:
if "Project Not Found" in e.error_message:
raise ValueError("Project '%s' not found. Valid projects are: %s"
% (project,
[project.path_with_namespace for project in self._connector.projects.list()]))
def get(self) -> Dict[str, str]:
"""
Gets the build variables for the project.
:return: the build variables
"""
variables = self._project.variables.list(all=True)
return {variable.key: variable.value for variable in variables}
def clear(self):
"""
Clears all of the build variables.
"""
for variable in self._project.variables.list(all=True):
variable.delete()
def remove(self, variables: Union[Iterable[str], Dict[str, str]]=None):
"""
Removes the given variables. Will only remove a key if it has the given value if the value has been defined.
:param variables: the variables to remove
"""
keys = list(variables.keys()) if isinstance(variables, Dict) else variables # type: Iterable[str]
for key in keys:
variable = self._project.variables.get(key)
if isinstance(variables, Dict):
if variables[key] != variable.value:
continue
variable.delete()
def set(self, variables: Dict[str, str]):
"""
Sets the build variables (i.e. removes old ones, adds new ones)
:param variables: the build variables to set
"""
current_variables = self.get()
difference = DictDiffer(variables, current_variables)
removed_keys = difference.removed()
self.remove(removed_keys)
changed_keys = difference.added() | difference.changed()
changed = {key: variables[key] for key in changed_keys}
self.add(changed, overwrite=True)
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/executables/gitlab_set_variables.py
|
_parse_args
|
python
|
def _parse_args(args: List[str]) -> _SetArgumentsRunConfig:
parser = argparse.ArgumentParser(
prog="gitlab-set-variables", description="Tool for setting a GitLab project's build variables")
add_common_arguments(parser, project=True)
parser.add_argument("source", nargs="+", type=str,
help="File to source build variables from. Can be a ini file, JSON file or a shell script "
"containing 'export' statements")
arguments = parser.parse_args(args)
return _SetArgumentsRunConfig(arguments.source, arguments.project, arguments.url, arguments.token, arguments.debug)
|
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/gitlab_set_variables.py#L21-L35
|
[
"def add_common_arguments(parser: ArgumentParser, project: bool=False):\n \"\"\"\n Adds common arguments to the given argument parser.\n :param parser: argument parser\n :param url: whether the URL named argument should be added\n :param token: whether the access token named argument should be added\n :param project: whether the project positional argument should be added\n \"\"\"\n parser.add_argument(\"--url\", type=str, help=\"Location of GitLab\")\n parser.add_argument(\"--token\", type=str, help=\"GitLab access token\")\n parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"Turns on debugging\")\n if project:\n parser.add_argument(\"project\", type=str, help=\"The GitLab project to set the build variables for\")\n"
] |
import argparse
import sys
from typing import List, Dict
from gitlabbuildvariables.common import GitLabConfig
from gitlabbuildvariables.executables._common import add_common_arguments, ProjectRunConfig
from gitlabbuildvariables.manager import ProjectVariablesManager
from gitlabbuildvariables.reader import read_variables
class _SetArgumentsRunConfig(ProjectRunConfig):
"""
Run configuration for setting arguments.
"""
def __init__(self, source: List[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self.source = source
def main():
"""
Main method.
"""
run_config = _parse_args(sys.argv[1:])
gitlab_config = GitLabConfig(run_config.url, run_config.token)
manager = ProjectVariablesManager(gitlab_config, run_config.project)
variables = {} # type: Dict[str, str]
for source in run_config.source:
variables.update(read_variables(source))
manager.set(variables)
print("Variables for project \"%s\" set to: %s" % (run_config.project, manager.get()))
if __name__ == "__main__":
main()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/executables/gitlab_set_variables.py
|
main
|
python
|
def main():
run_config = _parse_args(sys.argv[1:])
gitlab_config = GitLabConfig(run_config.url, run_config.token)
manager = ProjectVariablesManager(gitlab_config, run_config.project)
variables = {} # type: Dict[str, str]
for source in run_config.source:
variables.update(read_variables(source))
manager.set(variables)
print("Variables for project \"%s\" set to: %s" % (run_config.project, manager.get()))
|
Main method.
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/gitlab_set_variables.py#L38-L49
|
[
"def read_variables(config_location: str) -> Dict[str, str]:\n \"\"\"\n Reads variables out of a config file. Variables can be in a ini file, a shell file used to source the variables\n (i.e. one that has just got \"export *\" like statements in it) or in JSON.\n :param config_location: the location of the config file\n :return: dictionary where the variable names are key and their values are the values\n \"\"\"\n with open(config_location, \"r\") as config_file:\n config_lines = config_file.readlines()\n try:\n return json.loads(\"\".join(config_lines), parse_int=lambda num_str: str(num_str),\n parse_float=lambda float_str: str(float_str))\n except JSONDecodeError:\n pass\n config_lines = _shell_to_ini(config_lines)\n return _read_ini_config(\"\\n\".join(config_lines))\n",
"def _parse_args(args: List[str]) -> _SetArgumentsRunConfig:\n \"\"\"\n Parses the given CLI arguments to get a run configuration.\n :param args: CLI arguments\n :return: run configuration derived from the given CLI arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n prog=\"gitlab-set-variables\", description=\"Tool for setting a GitLab project's build variables\")\n add_common_arguments(parser, project=True)\n parser.add_argument(\"source\", nargs=\"+\", type=str,\n help=\"File to source build variables from. Can be a ini file, JSON file or a shell script \"\n \"containing 'export' statements\")\n\n arguments = parser.parse_args(args)\n return _SetArgumentsRunConfig(arguments.source, arguments.project, arguments.url, arguments.token, arguments.debug)\n",
"def get(self) -> Dict[str, str]:\n \"\"\"\n Gets the build variables for the project.\n :return: the build variables\n \"\"\"\n variables = self._project.variables.list(all=True)\n return {variable.key: variable.value for variable in variables}\n",
"def set(self, variables: Dict[str, str]):\n \"\"\"\n Sets the build variables (i.e. removes old ones, adds new ones)\n :param variables: the build variables to set\n \"\"\"\n current_variables = self.get()\n difference = DictDiffer(variables, current_variables)\n\n removed_keys = difference.removed()\n self.remove(removed_keys)\n\n changed_keys = difference.added() | difference.changed()\n changed = {key: variables[key] for key in changed_keys}\n self.add(changed, overwrite=True)\n"
] |
import argparse
import sys
from typing import List, Dict
from gitlabbuildvariables.common import GitLabConfig
from gitlabbuildvariables.executables._common import add_common_arguments, ProjectRunConfig
from gitlabbuildvariables.manager import ProjectVariablesManager
from gitlabbuildvariables.reader import read_variables
class _SetArgumentsRunConfig(ProjectRunConfig):
"""
Run configuration for setting arguments.
"""
def __init__(self, source: List[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self.source = source
def _parse_args(args: List[str]) -> _SetArgumentsRunConfig:
"""
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
"""
parser = argparse.ArgumentParser(
prog="gitlab-set-variables", description="Tool for setting a GitLab project's build variables")
add_common_arguments(parser, project=True)
parser.add_argument("source", nargs="+", type=str,
help="File to source build variables from. Can be a ini file, JSON file or a shell script "
"containing 'export' statements")
arguments = parser.parse_args(args)
return _SetArgumentsRunConfig(arguments.source, arguments.project, arguments.url, arguments.token, arguments.debug)
if __name__ == "__main__":
main()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/update/_builders.py
|
ProjectVariablesUpdaterBuilder.build
|
python
|
def build(self, project: str, groups: Iterable[str], gitlab_config: GitLabConfig) \
-> ProjectVariablesUpdaterType:
"""
Builds a `ProjectVariablesUpdater` instance using the given arguments.
:param project: the project that variables are to be updated for
:param groups: the groups of settings that should be set for the project
:param gitlab_config: the configuration required to access GitLab
:return: the project variable updater
"""
|
Builds a `ProjectVariablesUpdater` instance using the given arguments.
:param project: the project that variables are to be updated for
:param groups: the groups of settings that should be set for the project
:param gitlab_config: the configuration required to access GitLab
:return: the project variable updater
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/update/_builders.py#L16-L24
| null |
class ProjectVariablesUpdaterBuilder(Generic[ProjectVariablesUpdaterType], metaclass=ABCMeta):
"""
Builder of `ProjectVariablesUpdater` instances.
"""
@abstractmethod
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/update/_single_project_updaters.py
|
ProjectVariablesUpdater._get_variables
|
python
|
def _get_variables(self) -> Dict[str, str]:
variables = {} # type: Dict[str, str]
for group in self.groups:
setting_variables = self._read_group_variables(group)
variables.update(setting_variables)
return variables
|
Gets the variables that should be set for this project.
:return: the variables
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/update/_single_project_updaters.py#L46-L55
|
[
"def _read_group_variables(self, group: str) -> Dict[str, str]:\n \"\"\"\n Reads the setting variables associated to the given group identifier.\n :param group: the identifier of the group\n :return: the setting variables associated to the given group\n \"\"\"\n"
] |
class ProjectVariablesUpdater(VariablesUpdater, metaclass=ABCMeta):
"""
Updates variables for a project in GitLab CI.
"""
@abstractmethod
def _read_group_variables(self, group: str) -> Dict[str, str]:
"""
Reads the setting variables associated to the given group identifier.
:param group: the identifier of the group
:return: the setting variables associated to the given group
"""
def __init__(self, project: str, groups: Iterable[str], **kwargs):
"""
Constructor.
:param project: name or ID of the project to update variables for
:param groups: lgroups of settings variables that are to be set (lowest preference first)
:param kwargs: named arguments required in `VariablesUpdater` constructor
"""
super().__init__(**kwargs)
self.project = project
self.groups = groups
self._variables_manager = ProjectVariablesManager(self.gitlab_config, project)
def update(self):
variables = self._get_variables()
self._variables_manager.set(variables)
logger.info("Set variables for \"%s\": %s" % (self.project, variables))
def update_required(self) -> bool:
return self._variables_manager.get() != self._get_variables()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/update/_single_project_updaters.py
|
FileBasedProjectVariablesUpdater._resolve_group_location
|
python
|
def _resolve_group_location(self, group: str) -> str:
if os.path.isabs(group):
possible_paths = [group]
else:
possible_paths = []
for repository in self.setting_repositories:
possible_paths.append(os.path.join(repository, group))
for default_setting_extension in self.default_setting_extensions:
number_of_paths = len(possible_paths)
for i in range(number_of_paths):
path_with_extension = "%s.%s" % (possible_paths[i], default_setting_extension)
possible_paths.append(path_with_extension)
for path in possible_paths:
if os.path.exists(path):
return path
raise ValueError("Could not resolve location of settings identified by: \"%s\"" % group)
|
Resolves the location of a setting file based on the given identifier.
:param group: the identifier for the group's settings file (~its location)
:return: the absolute path of the settings location
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/update/_single_project_updaters.py#L78-L100
| null |
class FileBasedProjectVariablesUpdater(ProjectVariablesUpdater):
"""
Updates variables for a project in GitLab CI based on the values stored within a file.
"""
def __init__(self, setting_repositories: List[str]=None, default_setting_extensions: List[str]=None, **kwargs):
"""
Constructor.
:param setting_repositories: directories that may contain variable source files (highest preference first)
:param default_setting_extensions: file extensions that variable source files could have if that given is not
found(highest preference first, e.g. ["json", "init"])
:param kwargs: named arguments required for `ProjectVariablesUpdater`
"""
super().__init__(**kwargs)
self.setting_repositories = setting_repositories if setting_repositories is not None else []
self.default_setting_extensions = default_setting_extensions if default_setting_extensions is not None else []
def _read_group_variables(self, group: str) -> Dict[str, str]:
setting_location = self._resolve_group_location(group)
return read_variables(setting_location)
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/executables/_common.py
|
add_common_arguments
|
python
|
def add_common_arguments(parser: ArgumentParser, project: bool=False):
parser.add_argument("--url", type=str, help="Location of GitLab")
parser.add_argument("--token", type=str, help="GitLab access token")
parser.add_argument("--debug", action="store_true", default=False, help="Turns on debugging")
if project:
parser.add_argument("project", type=str, help="The GitLab project to set the build variables for")
|
Adds common arguments to the given argument parser.
:param parser: argument parser
:param url: whether the URL named argument should be added
:param token: whether the access token named argument should be added
:param project: whether the project positional argument should be added
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/_common.py#L23-L35
| null |
from argparse import ArgumentParser
class RunConfig:
"""
Run configuration for use against GitLab.
"""
def __init__(self, url: str, token: str, debug: bool=False):
self.url = url
self.token = token
self.debug = debug
class ProjectRunConfig(RunConfig):
"""
Run configuration for use against GitLab for a particular project.
"""
def __init__(self, project: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.project = project
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/executables/gitlab_update_variables.py
|
_parse_args
|
python
|
def _parse_args(args: List[str]) -> _UpdateArgumentsRunConfig:
parser = argparse.ArgumentParser(
prog="gitlab-update-variables", description="Tool for setting a GitLab project's build variables")
add_common_arguments(parser)
parser.add_argument("config_location", type=str, help="Location of the configuration file")
parser.add_argument("--setting-repository", dest="setting_repository", nargs="+", type=str,
help="Directory from which variable settings groups may be sourced")
parser.add_argument("--default-setting-extension", dest="default_setting_extensions",nargs="+", type=str,
help="Extensions to try adding to the variable to source location if it does not exist")
arguments = parser.parse_args(args)
return _UpdateArgumentsRunConfig(
arguments.config_location, arguments.setting_repository, arguments.default_setting_extensions,
url=arguments.url, token=arguments.token, debug=arguments.debug)
|
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/gitlab_update_variables.py#L24-L42
|
[
"def add_common_arguments(parser: ArgumentParser, project: bool=False):\n \"\"\"\n Adds common arguments to the given argument parser.\n :param parser: argument parser\n :param url: whether the URL named argument should be added\n :param token: whether the access token named argument should be added\n :param project: whether the project positional argument should be added\n \"\"\"\n parser.add_argument(\"--url\", type=str, help=\"Location of GitLab\")\n parser.add_argument(\"--token\", type=str, help=\"GitLab access token\")\n parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"Turns on debugging\")\n if project:\n parser.add_argument(\"project\", type=str, help=\"The GitLab project to set the build variables for\")\n"
] |
import argparse
import logging
import sys
from typing import List
from gitlabbuildvariables.common import GitLabConfig
from gitlabbuildvariables.executables._common import add_common_arguments, RunConfig
from gitlabbuildvariables.update import logger, FileBasedProjectVariablesUpdaterBuilder, \
FileBasedProjectsVariablesUpdater
class _UpdateArgumentsRunConfig(RunConfig):
"""
Run configuration for setting arguments.
"""
def __init__(self, config_location: str, setting_repositories: List[str],
default_setting_extensions: List[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self.config_location = config_location
self.setting_repositories = setting_repositories
self.default_setting_extensions = default_setting_extensions
def main():
"""
Main method.
"""
run_config = _parse_args(sys.argv[1:])
if run_config.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
gitlab_config = GitLabConfig(run_config.url, run_config.token)
project_updater_builder = FileBasedProjectVariablesUpdaterBuilder(
setting_repositories=run_config.setting_repositories,
default_setting_extensions=run_config.default_setting_extensions)
updater = FileBasedProjectsVariablesUpdater(config_location=run_config.config_location, gitlab_config=gitlab_config,
project_variables_updater_builder=project_updater_builder)
updater.update()
if __name__ == "__main__":
main()
|
wtsi-hgi/gitlab-build-variables
|
gitlabbuildvariables/executables/gitlab_update_variables.py
|
main
|
python
|
def main():
run_config = _parse_args(sys.argv[1:])
if run_config.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
gitlab_config = GitLabConfig(run_config.url, run_config.token)
project_updater_builder = FileBasedProjectVariablesUpdaterBuilder(
setting_repositories=run_config.setting_repositories,
default_setting_extensions=run_config.default_setting_extensions)
updater = FileBasedProjectsVariablesUpdater(config_location=run_config.config_location, gitlab_config=gitlab_config,
project_variables_updater_builder=project_updater_builder)
updater.update()
|
Main method.
|
train
|
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/executables/gitlab_update_variables.py#L45-L62
|
[
"def _parse_args(args: List[str]) -> _UpdateArgumentsRunConfig:\n \"\"\"\n Parses the given CLI arguments to get a run configuration.\n :param args: CLI arguments\n :return: run configuration derived from the given CLI arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n prog=\"gitlab-update-variables\", description=\"Tool for setting a GitLab project's build variables\")\n add_common_arguments(parser)\n parser.add_argument(\"config_location\", type=str, help=\"Location of the configuration file\")\n parser.add_argument(\"--setting-repository\", dest=\"setting_repository\", nargs=\"+\", type=str,\n help=\"Directory from which variable settings groups may be sourced\")\n parser.add_argument(\"--default-setting-extension\", dest=\"default_setting_extensions\",nargs=\"+\", type=str,\n help=\"Extensions to try adding to the variable to source location if it does not exist\")\n\n arguments = parser.parse_args(args)\n return _UpdateArgumentsRunConfig(\n arguments.config_location, arguments.setting_repository, arguments.default_setting_extensions,\n url=arguments.url, token=arguments.token, debug=arguments.debug)\n",
"def update(self):\n for project, settings_group in self._get_projects_and_settings_groups():\n project_updater = self.project_variables_updater_builder.build(\n project=project, groups=settings_group, gitlab_config=self.gitlab_config)\n project_updater.update()\n"
] |
import argparse
import logging
import sys
from typing import List
from gitlabbuildvariables.common import GitLabConfig
from gitlabbuildvariables.executables._common import add_common_arguments, RunConfig
from gitlabbuildvariables.update import logger, FileBasedProjectVariablesUpdaterBuilder, \
FileBasedProjectsVariablesUpdater
class _UpdateArgumentsRunConfig(RunConfig):
"""
Run configuration for setting arguments.
"""
def __init__(self, config_location: str, setting_repositories: List[str],
default_setting_extensions: List[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self.config_location = config_location
self.setting_repositories = setting_repositories
self.default_setting_extensions = default_setting_extensions
def _parse_args(args: List[str]) -> _UpdateArgumentsRunConfig:
"""
Parses the given CLI arguments to get a run configuration.
:param args: CLI arguments
:return: run configuration derived from the given CLI arguments
"""
parser = argparse.ArgumentParser(
prog="gitlab-update-variables", description="Tool for setting a GitLab project's build variables")
add_common_arguments(parser)
parser.add_argument("config_location", type=str, help="Location of the configuration file")
parser.add_argument("--setting-repository", dest="setting_repository", nargs="+", type=str,
help="Directory from which variable settings groups may be sourced")
parser.add_argument("--default-setting-extension", dest="default_setting_extensions",nargs="+", type=str,
help="Extensions to try adding to the variable to source location if it does not exist")
arguments = parser.parse_args(args)
return _UpdateArgumentsRunConfig(
arguments.config_location, arguments.setting_repository, arguments.default_setting_extensions,
url=arguments.url, token=arguments.token, debug=arguments.debug)
if __name__ == "__main__":
main()
|
Vito2015/pyextend
|
pyextend/formula/lbstools.py
|
haversine
|
python
|
def haversine(lng1, lat1, lng2, lat2):
# Convert coordinates to floats.
lng1, lat1, lng2, lat2 = map(float, [lng1, lat1, lng2, lat2])
# Convert to radians from degrees
lng1, lat1, lng2, lat2 = map(math.radians, [lng1, lat1, lng2, lat2])
# Compute distance
dlng = lng2 - lng1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlng/2)**2
c = 2 * math.asin(math.sqrt(a))
km = 6367 * c
return km
|
Compute km by geo-coordinates
See also: haversine define https://en.wikipedia.org/wiki/Haversine_formula
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/formula/lbstools.py#L13-L29
| null |
# coding: utf-8
"""
pyextend.formula
~~~~~~~~~~~~~~~~
pyextend formula package
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import math
def calc_distance(lng1, lat1, lng2, lat2):
"""Calc distance (km) by geo-coordinates.
@:param lng1: first coordinate.lng
@:param lat1: first coordinate.lat
@:param lng2: second coordinate.lng
@:param lat2: second coordinate.lat
@:return distance: km
"""
ra = 6378.140 # 赤道半径 (km)
rb = 6356.755 # 极半径 (km)
flatten = (ra - rb) / ra # 地球扁率
rad_lat_1 = math.radians(lat1)
rad_lng_1 = math.radians(lng1)
rad_lat_2 = math.radians(lat2)
rad_lng_2 = math.radians(lng2)
p1 = math.atan(rb / ra * math.tan(rad_lat_1))
p2 = math.atan(rb / ra * math.tan(rad_lat_2))
xx = math.acos(math.sin(p1) * math.sin(p2) + math.cos(p1) * math.cos(p2) * math.cos(rad_lng_1 - rad_lng_2))
c1 = (math.sin(xx) - xx) * (math.sin(p1) + math.sin(p2)) ** 2 / math.cos(xx / 2) ** 2
c2 = (math.sin(xx) + xx) * (math.sin(p1) - math.sin(p2)) ** 2 / math.sin(xx / 2) ** 2
dr = flatten / 8 * (c1 - c2)
distance = ra * (xx + dr)
return distance
|
Vito2015/pyextend
|
pyextend/formula/lbstools.py
|
calc_distance
|
python
|
def calc_distance(lng1, lat1, lng2, lat2):
ra = 6378.140 # 赤道半径 (km)
rb = 6356.755 # 极半径 (km)
flatten = (ra - rb) / ra # 地球扁率
rad_lat_1 = math.radians(lat1)
rad_lng_1 = math.radians(lng1)
rad_lat_2 = math.radians(lat2)
rad_lng_2 = math.radians(lng2)
p1 = math.atan(rb / ra * math.tan(rad_lat_1))
p2 = math.atan(rb / ra * math.tan(rad_lat_2))
xx = math.acos(math.sin(p1) * math.sin(p2) + math.cos(p1) * math.cos(p2) * math.cos(rad_lng_1 - rad_lng_2))
c1 = (math.sin(xx) - xx) * (math.sin(p1) + math.sin(p2)) ** 2 / math.cos(xx / 2) ** 2
c2 = (math.sin(xx) + xx) * (math.sin(p1) - math.sin(p2)) ** 2 / math.sin(xx / 2) ** 2
dr = flatten / 8 * (c1 - c2)
distance = ra * (xx + dr)
return distance
|
Calc distance (km) by geo-coordinates.
@:param lng1: first coordinate.lng
@:param lat1: first coordinate.lat
@:param lng2: second coordinate.lng
@:param lat2: second coordinate.lat
@:return distance: km
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/formula/lbstools.py#L32-L54
| null |
# coding: utf-8
"""
pyextend.formula
~~~~~~~~~~~~~~~~
pyextend formula package
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import math
def haversine(lng1, lat1, lng2, lat2):
"""Compute km by geo-coordinates
See also: haversine define https://en.wikipedia.org/wiki/Haversine_formula
"""
# Convert coordinates to floats.
lng1, lat1, lng2, lat2 = map(float, [lng1, lat1, lng2, lat2])
# Convert to radians from degrees
lng1, lat1, lng2, lat2 = map(math.radians, [lng1, lat1, lng2, lat2])
# Compute distance
dlng = lng2 - lng1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlng/2)**2
c = 2 * math.asin(math.sqrt(a))
km = 6367 * c
return km
|
Vito2015/pyextend
|
pyextend/core/math.py
|
isprime
|
python
|
def isprime(n):
n = abs(int(n))
if n < 2:
return False
if n == 2:
return True
if not n & 1:
return False
# 在一般领域, 对正整数n, 如果用2 到 sqrt(n) 之间所有整数去除, 均无法整除, 则n为质数.
for x in range(3, int(n ** 0.5)+1, 2):
if n % x == 0:
return False
return True
|
Check the number is prime value. if prime value returns True, not False.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/math.py#L12-L28
| null |
# coding: utf-8
"""
pyextend.core.math
~~~~~~~~~~~~~~~~~~
pyextend core math tools.
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
|
Vito2015/pyextend
|
pyextend/core/log.py
|
add_handler
|
python
|
def add_handler(cls, level, fmt, colorful, **kwargs):
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler
|
Add a configured handler to the global logger.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L115-L133
| null |
# coding: utf-8
"""
pyextend.core.log
~~~~~~~~~~~~~~~
Implements a simple log library.
This module is a simple encapsulation of logging module to provide a more
convenient interface to write log. The log will both print to stdout and
write to log file. It provides a more flexible way to set the log actions,
and also very simple. See examples showed below:
Example 1: Use default settings
import log
log.debug('hello, world')
log.info('hello, world')
log.error('hello, world')
log.critical('hello, world')
Result:
Print all log messages to file, and only print log whose level is greater
than ERROR to stdout. The log file is located in '/tmp/xxx.log' if the module
name is xxx.py. The default log file handler is size-rotated, if the log
file's size is greater than 20M, then it will be rotated.
Example 2: Use set_logger to change settings
# Change limit size in bytes of default rotating action
log.set_logger(limit = 10240) # 10M
# Use time-rotated file handler, each day has a different log file, see
# logging.handlers.TimedRotatingFileHandler for more help about 'when'
log.set_logger(when = 'D', limit = 1)
# Use normal file handler (not rotated)
log.set_logger(backup_count = 0)
# File log level set to INFO, and stdout log level set to DEBUG
log.set_logger(level = 'DEBUG:INFO')
# Both log level set to INFO
log.set_logger(level = 'INFO')
# Change default log file name and log mode
log.set_logger(filename = 'yyy.log', mode = 'w')
# Change default log formatter
log.set_logger(fmt = '[%(levelname)s] %(message)s'
Notice: Default logger has non-filehandler, if you need log into file, please call:
log.set_logger(filename='filename.log', with_filehandler=True)
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import os
import sys
import logging
import logging.handlers
_logging_funcs = ['debug', 'info', 'warning', 'error', 'critical', 'exception']
__all__ = ['set_logger', 'disable'] + _logging_funcs
# logging levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# Color escape string
COLOR_RED = '\033[1;31m'
COLOR_GREEN = '\033[1;32m'
COLOR_YELLOW = '\033[1;33m'
COLOR_BLUE = '\033[1;34m'
COLOR_PURPLE = '\033[1;35m'
COLOR_CYAN = '\033[1;36m'
COLOR_GRAY = '\033[1;37m'
COLOR_WHITE = '\033[1;38m'
COLOR_RESET = '\033[1;0m' # '\033[1;0m'
# Define log color
LOG_COLORS = {
'DEBUG': COLOR_GRAY + '%s' + COLOR_RESET, # '%s',
'INFO': COLOR_GREEN + '%s' + COLOR_RESET,
'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET,
'ERROR': COLOR_RED + '%s' + COLOR_RESET,
'CRITICAL': COLOR_PURPLE + '%s' + COLOR_RESET,
'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET,
}
# Global logger
g_logger = None
class ColoredFormatter(logging.Formatter):
"""A colorful formatter."""
def __init__(self, fmt=None, datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, record):
level_name = record.levelname
msg = logging.Formatter.format(self, record)
msg = LOG_COLORS.get(level_name, '%s') % msg
return msg
def add_handler(cls, level, fmt, colorful, **kwargs):
"""Add a configured handler to the global logger."""
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler
def add_streamhandler(level, fmt):
"""Add a stream handler to the global logger."""
return add_handler(logging.StreamHandler, level, fmt, True)
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):
"""Add a file handler to the global logger."""
kwargs = {}
# If the filename is not set, use the default filename
if filename is None:
filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
filename = os.path.basename(filename.replace('.py', '.log'))
filename = os.path.join('/tmp', filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
kwargs['filename'] = filename
# Choose the filehandler based on the passed arguments
if backup_count == 0: # Use FileHandler
cls = logging.FileHandler
kwargs['mode'] = mode
elif when is None: # Use RotatingFileHandler
cls = logging.handlers.RotatingFileHandler
kwargs['maxBytes'] = limit
kwargs['backupCount'] = backup_count
kwargs['mode'] = mode
else: # Use TimedRotatingFileHandler
cls = logging.handlers.TimedRotatingFileHandler
kwargs['when'] = when
kwargs['interval'] = limit
kwargs['backupCount'] = backup_count
return add_handler(cls, level, fmt, False, **kwargs)
def init_logger(name=None):
"""Reload the global logger."""
global g_logger
if g_logger is None:
g_logger = logging.getLogger(name=name)
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
def disable(level):
"""Disable all logging calls of severity 'level' and below."""
logging.disable(level)
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
"""Configure the global logger."""
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs()
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func)
# Set a default logger
set_logger(with_filehandler=False)
|
Vito2015/pyextend
|
pyextend/core/log.py
|
add_filehandler
|
python
|
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):
kwargs = {}
# If the filename is not set, use the default filename
if filename is None:
filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
filename = os.path.basename(filename.replace('.py', '.log'))
filename = os.path.join('/tmp', filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
kwargs['filename'] = filename
# Choose the filehandler based on the passed arguments
if backup_count == 0: # Use FileHandler
cls = logging.FileHandler
kwargs['mode'] = mode
elif when is None: # Use RotatingFileHandler
cls = logging.handlers.RotatingFileHandler
kwargs['maxBytes'] = limit
kwargs['backupCount'] = backup_count
kwargs['mode'] = mode
else: # Use TimedRotatingFileHandler
cls = logging.handlers.TimedRotatingFileHandler
kwargs['when'] = when
kwargs['interval'] = limit
kwargs['backupCount'] = backup_count
return add_handler(cls, level, fmt, False, **kwargs)
|
Add a file handler to the global logger.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L141-L171
|
[
"def add_handler(cls, level, fmt, colorful, **kwargs):\n \"\"\"Add a configured handler to the global logger.\"\"\"\n global g_logger\n\n if isinstance(level, str):\n level = getattr(logging, level.upper(), logging.DEBUG)\n\n handler = cls(**kwargs)\n handler.setLevel(level)\n\n if colorful:\n formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')\n else:\n formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')\n\n handler.setFormatter(formatter)\n g_logger.addHandler(handler)\n\n return handler\n"
] |
# coding: utf-8
"""
pyextend.core.log
~~~~~~~~~~~~~~~
Implements a simple log library.
This module is a simple encapsulation of logging module to provide a more
convenient interface to write log. The log will both print to stdout and
write to log file. It provides a more flexible way to set the log actions,
and also very simple. See examples showed below:
Example 1: Use default settings
import log
log.debug('hello, world')
log.info('hello, world')
log.error('hello, world')
log.critical('hello, world')
Result:
Print all log messages to file, and only print log whose level is greater
than ERROR to stdout. The log file is located in '/tmp/xxx.log' if the module
name is xxx.py. The default log file handler is size-rotated, if the log
file's size is greater than 20M, then it will be rotated.
Example 2: Use set_logger to change settings
# Change limit size in bytes of default rotating action
log.set_logger(limit = 10240) # 10M
# Use time-rotated file handler, each day has a different log file, see
# logging.handlers.TimedRotatingFileHandler for more help about 'when'
log.set_logger(when = 'D', limit = 1)
# Use normal file handler (not rotated)
log.set_logger(backup_count = 0)
# File log level set to INFO, and stdout log level set to DEBUG
log.set_logger(level = 'DEBUG:INFO')
# Both log level set to INFO
log.set_logger(level = 'INFO')
# Change default log file name and log mode
log.set_logger(filename = 'yyy.log', mode = 'w')
# Change default log formatter
log.set_logger(fmt = '[%(levelname)s] %(message)s'
Notice: Default logger has non-filehandler, if you need log into file, please call:
log.set_logger(filename='filename.log', with_filehandler=True)
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import os
import sys
import logging
import logging.handlers
_logging_funcs = ['debug', 'info', 'warning', 'error', 'critical', 'exception']
__all__ = ['set_logger', 'disable'] + _logging_funcs
# logging levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# Color escape string
COLOR_RED = '\033[1;31m'
COLOR_GREEN = '\033[1;32m'
COLOR_YELLOW = '\033[1;33m'
COLOR_BLUE = '\033[1;34m'
COLOR_PURPLE = '\033[1;35m'
COLOR_CYAN = '\033[1;36m'
COLOR_GRAY = '\033[1;37m'
COLOR_WHITE = '\033[1;38m'
COLOR_RESET = '\033[1;0m' # '\033[1;0m'
# Define log color
LOG_COLORS = {
'DEBUG': COLOR_GRAY + '%s' + COLOR_RESET, # '%s',
'INFO': COLOR_GREEN + '%s' + COLOR_RESET,
'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET,
'ERROR': COLOR_RED + '%s' + COLOR_RESET,
'CRITICAL': COLOR_PURPLE + '%s' + COLOR_RESET,
'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET,
}
# Global logger
g_logger = None
class ColoredFormatter(logging.Formatter):
"""A colorful formatter."""
def __init__(self, fmt=None, datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, record):
level_name = record.levelname
msg = logging.Formatter.format(self, record)
msg = LOG_COLORS.get(level_name, '%s') % msg
return msg
def add_handler(cls, level, fmt, colorful, **kwargs):
"""Add a configured handler to the global logger."""
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler
def add_streamhandler(level, fmt):
"""Add a stream handler to the global logger."""
return add_handler(logging.StreamHandler, level, fmt, True)
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):
"""Add a file handler to the global logger."""
kwargs = {}
# If the filename is not set, use the default filename
if filename is None:
filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
filename = os.path.basename(filename.replace('.py', '.log'))
filename = os.path.join('/tmp', filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
kwargs['filename'] = filename
# Choose the filehandler based on the passed arguments
if backup_count == 0: # Use FileHandler
cls = logging.FileHandler
kwargs['mode'] = mode
elif when is None: # Use RotatingFileHandler
cls = logging.handlers.RotatingFileHandler
kwargs['maxBytes'] = limit
kwargs['backupCount'] = backup_count
kwargs['mode'] = mode
else: # Use TimedRotatingFileHandler
cls = logging.handlers.TimedRotatingFileHandler
kwargs['when'] = when
kwargs['interval'] = limit
kwargs['backupCount'] = backup_count
return add_handler(cls, level, fmt, False, **kwargs)
def init_logger(name=None):
"""Reload the global logger."""
global g_logger
if g_logger is None:
g_logger = logging.getLogger(name=name)
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
def disable(level):
"""Disable all logging calls of severity 'level' and below."""
logging.disable(level)
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
"""Configure the global logger."""
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs()
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func)
# Set a default logger
set_logger(with_filehandler=False)
|
Vito2015/pyextend
|
pyextend/core/log.py
|
init_logger
|
python
|
def init_logger(name=None):
global g_logger
if g_logger is None:
g_logger = logging.getLogger(name=name)
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
|
Reload the global logger.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L174-L184
| null |
# coding: utf-8
"""
pyextend.core.log
~~~~~~~~~~~~~~~
Implements a simple log library.
This module is a simple encapsulation of logging module to provide a more
convenient interface to write log. The log will both print to stdout and
write to log file. It provides a more flexible way to set the log actions,
and also very simple. See examples showed below:
Example 1: Use default settings
import log
log.debug('hello, world')
log.info('hello, world')
log.error('hello, world')
log.critical('hello, world')
Result:
Print all log messages to file, and only print log whose level is greater
than ERROR to stdout. The log file is located in '/tmp/xxx.log' if the module
name is xxx.py. The default log file handler is size-rotated, if the log
file's size is greater than 20M, then it will be rotated.
Example 2: Use set_logger to change settings
# Change limit size in bytes of default rotating action
log.set_logger(limit = 10240) # 10M
# Use time-rotated file handler, each day has a different log file, see
# logging.handlers.TimedRotatingFileHandler for more help about 'when'
log.set_logger(when = 'D', limit = 1)
# Use normal file handler (not rotated)
log.set_logger(backup_count = 0)
# File log level set to INFO, and stdout log level set to DEBUG
log.set_logger(level = 'DEBUG:INFO')
# Both log level set to INFO
log.set_logger(level = 'INFO')
# Change default log file name and log mode
log.set_logger(filename = 'yyy.log', mode = 'w')
# Change default log formatter
log.set_logger(fmt = '[%(levelname)s] %(message)s'
Notice: Default logger has non-filehandler, if you need log into file, please call:
log.set_logger(filename='filename.log', with_filehandler=True)
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import os
import sys
import logging
import logging.handlers
_logging_funcs = ['debug', 'info', 'warning', 'error', 'critical', 'exception']
__all__ = ['set_logger', 'disable'] + _logging_funcs
# logging levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# Color escape string
COLOR_RED = '\033[1;31m'
COLOR_GREEN = '\033[1;32m'
COLOR_YELLOW = '\033[1;33m'
COLOR_BLUE = '\033[1;34m'
COLOR_PURPLE = '\033[1;35m'
COLOR_CYAN = '\033[1;36m'
COLOR_GRAY = '\033[1;37m'
COLOR_WHITE = '\033[1;38m'
COLOR_RESET = '\033[1;0m' # '\033[1;0m'
# Define log color
LOG_COLORS = {
'DEBUG': COLOR_GRAY + '%s' + COLOR_RESET, # '%s',
'INFO': COLOR_GREEN + '%s' + COLOR_RESET,
'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET,
'ERROR': COLOR_RED + '%s' + COLOR_RESET,
'CRITICAL': COLOR_PURPLE + '%s' + COLOR_RESET,
'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET,
}
# Global logger
g_logger = None
class ColoredFormatter(logging.Formatter):
"""A colorful formatter."""
def __init__(self, fmt=None, datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, record):
level_name = record.levelname
msg = logging.Formatter.format(self, record)
msg = LOG_COLORS.get(level_name, '%s') % msg
return msg
def add_handler(cls, level, fmt, colorful, **kwargs):
"""Add a configured handler to the global logger."""
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler
def add_streamhandler(level, fmt):
"""Add a stream handler to the global logger."""
return add_handler(logging.StreamHandler, level, fmt, True)
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):
"""Add a file handler to the global logger."""
kwargs = {}
# If the filename is not set, use the default filename
if filename is None:
filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
filename = os.path.basename(filename.replace('.py', '.log'))
filename = os.path.join('/tmp', filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
kwargs['filename'] = filename
# Choose the filehandler based on the passed arguments
if backup_count == 0: # Use FileHandler
cls = logging.FileHandler
kwargs['mode'] = mode
elif when is None: # Use RotatingFileHandler
cls = logging.handlers.RotatingFileHandler
kwargs['maxBytes'] = limit
kwargs['backupCount'] = backup_count
kwargs['mode'] = mode
else: # Use TimedRotatingFileHandler
cls = logging.handlers.TimedRotatingFileHandler
kwargs['when'] = when
kwargs['interval'] = limit
kwargs['backupCount'] = backup_count
return add_handler(cls, level, fmt, False, **kwargs)
def init_logger(name=None):
"""Reload the global logger."""
global g_logger
if g_logger is None:
g_logger = logging.getLogger(name=name)
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
def disable(level):
"""Disable all logging calls of severity 'level' and below."""
logging.disable(level)
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
"""Configure the global logger."""
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs()
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func)
# Set a default logger
set_logger(with_filehandler=False)
|
Vito2015/pyextend
|
pyextend/core/log.py
|
set_logger
|
python
|
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs()
|
Configure the global logger.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L192-L212
|
[
"def add_streamhandler(level, fmt):\n \"\"\"Add a stream handler to the global logger.\"\"\"\n return add_handler(logging.StreamHandler, level, fmt, True)\n",
"def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):\n \"\"\"Add a file handler to the global logger.\"\"\"\n kwargs = {}\n\n # If the filename is not set, use the default filename\n if filename is None:\n filename = getattr(sys.modules['__main__'], '__file__', 'log.py')\n filename = os.path.basename(filename.replace('.py', '.log'))\n filename = os.path.join('/tmp', filename)\n\n if not os.path.exists(os.path.dirname(filename)):\n os.mkdir(os.path.dirname(filename))\n\n kwargs['filename'] = filename\n\n # Choose the filehandler based on the passed arguments\n if backup_count == 0: # Use FileHandler\n cls = logging.FileHandler\n kwargs['mode'] = mode\n elif when is None: # Use RotatingFileHandler\n cls = logging.handlers.RotatingFileHandler\n kwargs['maxBytes'] = limit\n kwargs['backupCount'] = backup_count\n kwargs['mode'] = mode\n else: # Use TimedRotatingFileHandler\n cls = logging.handlers.TimedRotatingFileHandler\n kwargs['when'] = when\n kwargs['interval'] = limit\n kwargs['backupCount'] = backup_count\n\n return add_handler(cls, level, fmt, False, **kwargs)\n",
"def init_logger(name=None):\n \"\"\"Reload the global logger.\"\"\"\n global g_logger\n\n if g_logger is None:\n g_logger = logging.getLogger(name=name)\n else:\n logging.shutdown()\n g_logger.handlers = []\n\n g_logger.setLevel(logging.DEBUG)\n",
"def import_log_funcs():\n \"\"\"Import the common log functions from the global logger to the module.\"\"\"\n global g_logger\n\n curr_mod = sys.modules[__name__]\n\n for func_name in _logging_funcs:\n func = getattr(g_logger, func_name)\n setattr(curr_mod, func_name, func)\n"
] |
# coding: utf-8
"""
pyextend.core.log
~~~~~~~~~~~~~~~
Implements a simple log library.
This module is a simple encapsulation of logging module to provide a more
convenient interface to write log. The log will both print to stdout and
write to log file. It provides a more flexible way to set the log actions,
and also very simple. See examples showed below:
Example 1: Use default settings
import log
log.debug('hello, world')
log.info('hello, world')
log.error('hello, world')
log.critical('hello, world')
Result:
Print all log messages to file, and only print log whose level is greater
than ERROR to stdout. The log file is located in '/tmp/xxx.log' if the module
name is xxx.py. The default log file handler is size-rotated, if the log
file's size is greater than 20M, then it will be rotated.
Example 2: Use set_logger to change settings
# Change limit size in bytes of default rotating action
log.set_logger(limit = 10240) # 10M
# Use time-rotated file handler, each day has a different log file, see
# logging.handlers.TimedRotatingFileHandler for more help about 'when'
log.set_logger(when = 'D', limit = 1)
# Use normal file handler (not rotated)
log.set_logger(backup_count = 0)
# File log level set to INFO, and stdout log level set to DEBUG
log.set_logger(level = 'DEBUG:INFO')
# Both log level set to INFO
log.set_logger(level = 'INFO')
# Change default log file name and log mode
log.set_logger(filename = 'yyy.log', mode = 'w')
# Change default log formatter
log.set_logger(fmt = '[%(levelname)s] %(message)s'
Notice: Default logger has non-filehandler, if you need log into file, please call:
log.set_logger(filename='filename.log', with_filehandler=True)
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import os
import sys
import logging
import logging.handlers
_logging_funcs = ['debug', 'info', 'warning', 'error', 'critical', 'exception']
__all__ = ['set_logger', 'disable'] + _logging_funcs
# logging levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# Color escape string
COLOR_RED = '\033[1;31m'
COLOR_GREEN = '\033[1;32m'
COLOR_YELLOW = '\033[1;33m'
COLOR_BLUE = '\033[1;34m'
COLOR_PURPLE = '\033[1;35m'
COLOR_CYAN = '\033[1;36m'
COLOR_GRAY = '\033[1;37m'
COLOR_WHITE = '\033[1;38m'
COLOR_RESET = '\033[1;0m' # '\033[1;0m'
# Define log color
LOG_COLORS = {
'DEBUG': COLOR_GRAY + '%s' + COLOR_RESET, # '%s',
'INFO': COLOR_GREEN + '%s' + COLOR_RESET,
'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET,
'ERROR': COLOR_RED + '%s' + COLOR_RESET,
'CRITICAL': COLOR_PURPLE + '%s' + COLOR_RESET,
'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET,
}
# Global logger
g_logger = None
class ColoredFormatter(logging.Formatter):
"""A colorful formatter."""
def __init__(self, fmt=None, datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, record):
level_name = record.levelname
msg = logging.Formatter.format(self, record)
msg = LOG_COLORS.get(level_name, '%s') % msg
return msg
def add_handler(cls, level, fmt, colorful, **kwargs):
"""Add a configured handler to the global logger."""
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler
def add_streamhandler(level, fmt):
"""Add a stream handler to the global logger."""
return add_handler(logging.StreamHandler, level, fmt, True)
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):
"""Add a file handler to the global logger."""
kwargs = {}
# If the filename is not set, use the default filename
if filename is None:
filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
filename = os.path.basename(filename.replace('.py', '.log'))
filename = os.path.join('/tmp', filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
kwargs['filename'] = filename
# Choose the filehandler based on the passed arguments
if backup_count == 0: # Use FileHandler
cls = logging.FileHandler
kwargs['mode'] = mode
elif when is None: # Use RotatingFileHandler
cls = logging.handlers.RotatingFileHandler
kwargs['maxBytes'] = limit
kwargs['backupCount'] = backup_count
kwargs['mode'] = mode
else: # Use TimedRotatingFileHandler
cls = logging.handlers.TimedRotatingFileHandler
kwargs['when'] = when
kwargs['interval'] = limit
kwargs['backupCount'] = backup_count
return add_handler(cls, level, fmt, False, **kwargs)
def init_logger(name=None):
"""Reload the global logger."""
global g_logger
if g_logger is None:
g_logger = logging.getLogger(name=name)
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
def disable(level):
"""Disable all logging calls of severity 'level' and below."""
logging.disable(level)
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
"""Configure the global logger."""
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs()
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func)
# Set a default logger
set_logger(with_filehandler=False)
|
Vito2015/pyextend
|
pyextend/core/log.py
|
import_log_funcs
|
python
|
def import_log_funcs():
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func)
|
Import the common log functions from the global logger to the module.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/log.py#L215-L223
| null |
# coding: utf-8
"""
pyextend.core.log
~~~~~~~~~~~~~~~
Implements a simple log library.
This module is a simple encapsulation of logging module to provide a more
convenient interface to write log. The log will both print to stdout and
write to log file. It provides a more flexible way to set the log actions,
and also very simple. See examples showed below:
Example 1: Use default settings
import log
log.debug('hello, world')
log.info('hello, world')
log.error('hello, world')
log.critical('hello, world')
Result:
Print all log messages to file, and only print log whose level is greater
than ERROR to stdout. The log file is located in '/tmp/xxx.log' if the module
name is xxx.py. The default log file handler is size-rotated, if the log
file's size is greater than 20M, then it will be rotated.
Example 2: Use set_logger to change settings
# Change limit size in bytes of default rotating action
log.set_logger(limit = 10240) # 10M
# Use time-rotated file handler, each day has a different log file, see
# logging.handlers.TimedRotatingFileHandler for more help about 'when'
log.set_logger(when = 'D', limit = 1)
# Use normal file handler (not rotated)
log.set_logger(backup_count = 0)
# File log level set to INFO, and stdout log level set to DEBUG
log.set_logger(level = 'DEBUG:INFO')
# Both log level set to INFO
log.set_logger(level = 'INFO')
# Change default log file name and log mode
log.set_logger(filename = 'yyy.log', mode = 'w')
# Change default log formatter
log.set_logger(fmt = '[%(levelname)s] %(message)s'
Notice: Default logger has non-filehandler, if you need log into file, please call:
log.set_logger(filename='filename.log', with_filehandler=True)
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import os
import sys
import logging
import logging.handlers
_logging_funcs = ['debug', 'info', 'warning', 'error', 'critical', 'exception']
__all__ = ['set_logger', 'disable'] + _logging_funcs
# logging levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# Color escape string
COLOR_RED = '\033[1;31m'
COLOR_GREEN = '\033[1;32m'
COLOR_YELLOW = '\033[1;33m'
COLOR_BLUE = '\033[1;34m'
COLOR_PURPLE = '\033[1;35m'
COLOR_CYAN = '\033[1;36m'
COLOR_GRAY = '\033[1;37m'
COLOR_WHITE = '\033[1;38m'
COLOR_RESET = '\033[1;0m' # '\033[1;0m'
# Define log color
LOG_COLORS = {
'DEBUG': COLOR_GRAY + '%s' + COLOR_RESET, # '%s',
'INFO': COLOR_GREEN + '%s' + COLOR_RESET,
'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET,
'ERROR': COLOR_RED + '%s' + COLOR_RESET,
'CRITICAL': COLOR_PURPLE + '%s' + COLOR_RESET,
'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET,
}
# Global logger
g_logger = None
class ColoredFormatter(logging.Formatter):
"""A colorful formatter."""
def __init__(self, fmt=None, datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, record):
level_name = record.levelname
msg = logging.Formatter.format(self, record)
msg = LOG_COLORS.get(level_name, '%s') % msg
return msg
def add_handler(cls, level, fmt, colorful, **kwargs):
"""Add a configured handler to the global logger."""
global g_logger
if isinstance(level, str):
level = getattr(logging, level.upper(), logging.DEBUG)
handler = cls(**kwargs)
handler.setLevel(level)
if colorful:
formatter = ColoredFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
g_logger.addHandler(handler)
return handler
def add_streamhandler(level, fmt):
"""Add a stream handler to the global logger."""
return add_handler(logging.StreamHandler, level, fmt, True)
def add_filehandler(level, fmt, filename, mode, backup_count, limit, when):
"""Add a file handler to the global logger."""
kwargs = {}
# If the filename is not set, use the default filename
if filename is None:
filename = getattr(sys.modules['__main__'], '__file__', 'log.py')
filename = os.path.basename(filename.replace('.py', '.log'))
filename = os.path.join('/tmp', filename)
if not os.path.exists(os.path.dirname(filename)):
os.mkdir(os.path.dirname(filename))
kwargs['filename'] = filename
# Choose the filehandler based on the passed arguments
if backup_count == 0: # Use FileHandler
cls = logging.FileHandler
kwargs['mode'] = mode
elif when is None: # Use RotatingFileHandler
cls = logging.handlers.RotatingFileHandler
kwargs['maxBytes'] = limit
kwargs['backupCount'] = backup_count
kwargs['mode'] = mode
else: # Use TimedRotatingFileHandler
cls = logging.handlers.TimedRotatingFileHandler
kwargs['when'] = when
kwargs['interval'] = limit
kwargs['backupCount'] = backup_count
return add_handler(cls, level, fmt, False, **kwargs)
def init_logger(name=None):
"""Reload the global logger."""
global g_logger
if g_logger is None:
g_logger = logging.getLogger(name=name)
else:
logging.shutdown()
g_logger.handlers = []
g_logger.setLevel(logging.DEBUG)
def disable(level):
"""Disable all logging calls of severity 'level' and below."""
logging.disable(level)
def set_logger(name=None, filename=None, mode='a', level='NOTSET:NOTSET',
fmt=
'%(asctime)s %(filename)s:%(lineno)d [PID:%(process)-5d THD:%(thread)-5d %(levelname)-7s] %(message)s',
# fmt='[%(levelname)s] %(asctime)s %(message)s',
backup_count=5, limit=20480, when=None, with_filehandler=True):
"""Configure the global logger."""
level = level.split(':')
if len(level) == 1: # Both set to the same level
s_level = f_level = level[0]
else:
s_level = level[0] # StreamHandler log level
f_level = level[1] # FileHandler log level
init_logger(name=name)
add_streamhandler(s_level, fmt)
if with_filehandler:
add_filehandler(f_level, fmt, filename, mode, backup_count, limit, when)
# Import the common log functions for convenient
import_log_funcs()
def import_log_funcs():
"""Import the common log functions from the global logger to the module."""
global g_logger
curr_mod = sys.modules[__name__]
for func_name in _logging_funcs:
func = getattr(g_logger, func_name)
setattr(curr_mod, func_name, func)
# Set a default logger
set_logger(with_filehandler=False)
|
Vito2015/pyextend
|
pyextend/network/regex.py
|
email_match
|
python
|
def email_match(string):
m = re.match(email_pattern, string)
if m:
# print('Match success: %s' % m.string)
return m.groups()[0], m.groups()[2]
else:
# print('Match failed: %s' % string)
return None
|
邮箱地址匹配. 匹配成功返回(email_name, email_server), 否则返回None
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/network/regex.py#L23-L31
| null |
# coding: utf-8
"""
pyextend.network.regex
~~~~~~~~~~~~~~~~~~~~
pyextend network regex
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
__all__ = ['email_match', 'email_pattern']
import re
# 表达式,如果匹配成功 这返回Match对象,否则返回None
# bill.gates@microsoft.com
# 当匹配成功时,在第1,3组中分别存放着
# Email的名字 如 bill.gates,
# Email服务器 如 microsoft.com
email_pattern = r'(\w+([-.]\w+)*)@(\w+([-.]\w+)*\.\w+([-.]\w+)*)'
if __name__ == '__main__':
from pyextend.core import log
L = ['someone@gmail.com',
'bill.gates@microsoft.com',
'bill.ga.tes@microsoft.com',
'bill.gates@micro.soft.com',
'bill.gates@micro..soft.com',
'bill..gates@microsoft.com',
]
for email in L:
res = email_match(email)
if res:
log.debug('Match success: %s' % email)
log.info('EmailName: %s, EmailServer: %s' % (res[0], res[1]))
else:
log.error('Match failed: %s' % email)
log.info('Done.')
|
Vito2015/pyextend
|
pyextend/formula/geo/geohash.py
|
decode
|
python
|
def decode(hashcode, delta=False):
lat, lon, lat_length, lon_length = _decode_c2i(hashcode)
if hasattr(float, "fromhex"):
latitude_delta = 90.0/(1 << lat_length)
longitude_delta = 180.0/(1 << lon_length)
latitude = _int_to_float_hex(lat, lat_length) * 90.0 + latitude_delta
longitude = _int_to_float_hex(lon, lon_length) * 180.0 + longitude_delta
if delta:
return latitude, longitude, latitude_delta, longitude_delta
return latitude, longitude
lat = (lat << 1) + 1
lon = (lon << 1) + 1
lat_length += 1
lon_length += 1
latitude = 180.0*(lat-(1 << (lat_length-1)))/(1 << lat_length)
longitude = 360.0*(lon-(1 << (lon_length-1)))/(1 << lon_length)
if delta:
latitude_delta = 180.0/(1 << lat_length)
longitude_delta = 360.0/(1 << lon_length)
return latitude, longitude, latitude_delta, longitude_delta
return latitude, longitude
|
decode a hashcode and get center coordinate, and distance between center and outer border
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/formula/geo/geohash.py#L152-L180
|
[
"def _int_to_float_hex(i, l):\n if l == 0:\n return -1.0\n\n half = 1 << (l-1)\n s = int((l+3)/4)\n if i >= half:\n i -= half\n return float.fromhex((\"0x0.%0\"+str(s)+\"xp1\") % (i << (s*4-l),))\n else:\n i = half-i\n return float.fromhex((\"-0x0.%0\"+str(s)+\"xp1\") % (i << (s*4-l),))\n",
"def _decode_c2i(hashcode):\n lon = 0\n lat = 0\n bit_length = 0\n lat_length = 0\n lon_length = 0\n for i in hashcode:\n t = _base32_map[i]\n if bit_length % 2 == 0:\n lon <<= 3\n lat <<= 2\n lon += (t >> 2) & 4\n lat += (t >> 2) & 2\n lon += (t >> 1) & 2\n lat += (t >> 1) & 1\n lon += t & 1\n lon_length += 3\n lat_length += 2\n else:\n lon <<= 2\n lat <<= 3\n lat += (t >> 2) & 4\n lon += (t >> 2) & 2\n lat += (t >> 1) & 2\n lon += (t >> 1) & 1\n lat += t & 1\n lon_length += 2\n lat_length += 3\n\n bit_length += 5\n\n return lat, lon, lat_length, lon_length\n"
] |
#!/usr/bin/env python
# coding: utf-8
__all__ = ['encode', 'decode', 'decode_exactly', 'bbox', 'neighbors', 'expand']
_base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
_base32_map = {}
for i in range(len(_base32)):
_base32_map[_base32[i]] = i
del i
LONG_ZERO = 0
import sys
if sys.version_info[0] < 3:
LONG_ZERO = long(0)
def _float_hex_to_int(f):
if f < -1.0 or f >= 1.0:
return None
if f == 0.0:
return 1, 1
h = f.hex()
x = h.find("0x1.")
assert(x >= 0)
p = h.find("p")
assert(p > 0)
half_len = len(h[x+4:p])*4-int(h[p+1:])
if x == 0:
r = (1 << half_len) + ((1 << (len(h[x+4:p])*4)) + int(h[x+4:p], 16))
else:
r = (1 << half_len) - ((1 << (len(h[x+4:p])*4)) + int(h[x+4:p], 16))
return r, half_len+1
def _int_to_float_hex(i, l):
if l == 0:
return -1.0
half = 1 << (l-1)
s = int((l+3)/4)
if i >= half:
i -= half
return float.fromhex(("0x0.%0"+str(s)+"xp1") % (i << (s*4-l),))
else:
i = half-i
return float.fromhex(("-0x0.%0"+str(s)+"xp1") % (i << (s*4-l),))
def _encode_i2c(lat, lon, lat_length, lon_length):
precision = int((lat_length+lon_length)/5)
if lat_length < lon_length:
a = lon
b = lat
else:
a = lat
b = lon
boost = (0, 1, 4, 5, 16, 17, 20, 21)
ret = ''
for i in range(precision):
ret += _base32[(boost[a & 7]+(boost[b & 3] << 1)) & 0x1F]
t = a >> 3
a = b >> 2
b = t
return ret[::-1]
def encode(latitude, longitude, precision=12):
if latitude >= 90.0 or latitude < -90.0:
raise Exception("invalid latitude.")
while longitude < -180.0:
longitude += 360.0
while longitude >= 180.0:
longitude -= 360.0
xprecision = precision+1
lat_length = lon_length = int(xprecision*5/2)
if xprecision % 2 == 1:
lon_length += 1
if hasattr(float, "fromhex"):
a = _float_hex_to_int(latitude/90.0)
o = _float_hex_to_int(longitude/180.0)
if a[1] > lat_length:
ai = a[0] >> (a[1]-lat_length)
else:
ai = a[0] << (lat_length-a[1])
if o[1] > lon_length:
oi = o[0] >> (o[1]-lon_length)
else:
oi = o[0] << (lon_length-o[1])
return _encode_i2c(ai, oi, lat_length, lon_length)[:precision]
lat = latitude/180.0
lon = longitude/360.0
if lat > 0:
lat = int((1 << lat_length)*lat)+(1 << (lat_length-1))
else:
lat = (1 << lat_length-1)-int((1 << lat_length)*(-lat))
if lon > 0:
lon = int((1 << lon_length)*lon)+(1 << (lon_length-1))
else:
lon = (1 << lon_length-1)-int((1 << lon_length)*(-lon))
return _encode_i2c(lat, lon, lat_length, lon_length)[:precision]
def _decode_c2i(hashcode):
lon = 0
lat = 0
bit_length = 0
lat_length = 0
lon_length = 0
for i in hashcode:
t = _base32_map[i]
if bit_length % 2 == 0:
lon <<= 3
lat <<= 2
lon += (t >> 2) & 4
lat += (t >> 2) & 2
lon += (t >> 1) & 2
lat += (t >> 1) & 1
lon += t & 1
lon_length += 3
lat_length += 2
else:
lon <<= 2
lat <<= 3
lat += (t >> 2) & 4
lon += (t >> 2) & 2
lat += (t >> 1) & 2
lon += (t >> 1) & 1
lat += t & 1
lon_length += 2
lat_length += 3
bit_length += 5
return lat, lon, lat_length, lon_length
def decode_exactly(hashcode):
return decode(hashcode, True)
# hashcode operations below
def bbox(hashcode):
"""
decode a hashcode and get north, south, east and west border.
"""
lat, lon, lat_length, lon_length = _decode_c2i(hashcode)
if hasattr(float, "fromhex"):
latitude_delta = 180.0/(1 << lat_length)
longitude_delta = 360.0/(1 << lon_length)
latitude = _int_to_float_hex(lat, lat_length) * 90.0
longitude = _int_to_float_hex(lon, lon_length) * 180.0
return {"s": latitude, "w": longitude, "n": latitude+latitude_delta, "e": longitude+longitude_delta}
ret = {}
if lat_length:
ret['n'] = 180.0*(lat+1-(1 << (lat_length-1)))/(1 << lat_length)
ret['s'] = 180.0*(lat-(1 << (lat_length-1)))/(1 << lat_length)
else: # can't calculate the half with bit shifts (negative shift)
ret['n'] = 90.0
ret['s'] = -90.0
if lon_length:
ret['e'] = 360.0*(lon+1-(1 << (lon_length-1)))/(1 << lon_length)
ret['w'] = 360.0*(lon-(1 << (lon_length-1)))/(1 << lon_length)
else: # can't calculate the half with bit shifts (negative shift)
ret['e'] = 180.0
ret['w'] = -180.0
return ret
def neighbors(hashcode):
lat, lon, lat_length, lon_length = _decode_c2i(hashcode)
ret = []
tlat = lat
for tlon in (lon-1, lon+1):
code = _encode_i2c(tlat, tlon, lat_length, lon_length)
if code:
ret.append(code)
tlat = lat+1
if not tlat >> lat_length:
for tlon in (lon-1, lon, lon+1):
ret.append(_encode_i2c(tlat, tlon, lat_length, lon_length))
tlat = lat-1
if tlat >= 0:
for tlon in (lon-1, lon, lon+1):
ret.append(_encode_i2c(tlat, tlon, lat_length, lon_length))
return ret
def expand(hashcode):
ret = neighbors(hashcode)
ret.append(hashcode)
return ret
def _uint64_interleave(lat32, lon32):
intr = 0
boost = (0, 1, 4, 5, 16, 17, 20, 21, 64, 65, 68, 69, 80, 81, 84, 85)
for i in range(8):
intr = (intr << 8) + (boost[(lon32 >> (28-i*4)) % 16] << 1) + boost[(lat32 >> (28-i*4)) % 16]
return intr
def _uint64_deinterleave(ui64):
lat = lon = 0
boost = ((0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (0, 3), (1, 2), (1, 3),
(2, 0), (2, 1), (3, 0), (3, 1), (2, 2), (2, 3), (3, 2), (3, 3))
for i in range(16):
p = boost[(ui64 >> (60-i*4)) % 16]
lon = (lon << 2) + p[0]
lat = (lat << 2) + p[1]
return lat, lon
def encode_uint64(latitude, longitude):
if latitude >= 90.0 or latitude < -90.0:
raise ValueError("Latitude must be in the range of (-90.0, 90.0)")
while longitude < -180.0:
longitude += 360.0
while longitude >= 180.0:
longitude -= 360.0
lat = int(((latitude + 90.0)/180.0)*(1 << 32))
lon = int(((longitude+180.0)/360.0)*(1 << 32))
return _uint64_interleave(lat, lon)
def decode_uint64(ui64):
lat, lon = _uint64_deinterleave(ui64)
return 180.0 * lat / (1 << 32) - 90.0, 360.0 * lon / (1 << 32) - 180.0
def expand_uint64(ui64, precision=50):
ui64 &= (0xFFFFFFFFFFFFFFFF << (64-precision))
lat, lon = _uint64_deinterleave(ui64)
lat_grid = 1 << (32-int(precision/2))
lon_grid = lat_grid >> (precision % 2)
if precision <= 2: # expand becomes to the whole range
return []
ranges = []
if lat & lat_grid:
if lon & lon_grid:
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+2))))
if precision % 2 == 0:
# lat, lon = (1, 1) and even precision
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
if lat + lat_grid < 0xFFFFFFFF:
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
# lat,lon = (1, 1) and odd precision
if lat + lat_grid < 0xFFFFFFFF:
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
ui64 = _uint64_interleave(lat+lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
ui64 = _uint64_interleave(lat-lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision+2))))
if precision % 2 == 0:
# lat,lon = (1, 0) and odd precision
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
if lat + lat_grid < 0xFFFFFFFF:
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
# lat,lon = (1, 0) and odd precision
if lat + lat_grid < 0xFFFFFFFF:
ui64 = _uint64_interleave(lat+lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
if lon & lon_grid:
ui64 = _uint64_interleave(lat, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+2))))
if precision % 2 == 0:
# lat,lon = (0, 1) and even precision
ui64 = _uint64_interleave(lat, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
if lat > 0:
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
# lat,lon = (0, 1) and odd precision
if lat > 0:
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
ui64 = _uint64_interleave(lat, lon)
ranges.append((ui64, ui64 + (1 << (64-precision+2))))
if precision % 2 == 0:
# lat,lon = (0, 0) and even precision
ui64 = _uint64_interleave(lat, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
if lat > 0:
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
# lat,lon = (0, 0) and odd precision
if lat > 0:
ui64 = _uint64_interleave(lat-lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ranges.sort()
# merge the conditions
shrink = []
prev = None
for i in ranges:
if prev:
if prev[1] != i[0]:
shrink.append(prev)
prev = i
else:
prev = (prev[0], i[1])
else:
prev = i
shrink.append(prev)
ranges = []
for i in shrink:
a, b = i
if a == 0:
a = None # we can remove the condition because it is the lowest value
if b == 0x10000000000000000:
b = None # we can remove the condition because it is the highest value
ranges.append((a, b))
return ranges
|
Vito2015/pyextend
|
pyextend/formula/geo/geohash.py
|
bbox
|
python
|
def bbox(hashcode):
lat, lon, lat_length, lon_length = _decode_c2i(hashcode)
if hasattr(float, "fromhex"):
latitude_delta = 180.0/(1 << lat_length)
longitude_delta = 360.0/(1 << lon_length)
latitude = _int_to_float_hex(lat, lat_length) * 90.0
longitude = _int_to_float_hex(lon, lon_length) * 180.0
return {"s": latitude, "w": longitude, "n": latitude+latitude_delta, "e": longitude+longitude_delta}
ret = {}
if lat_length:
ret['n'] = 180.0*(lat+1-(1 << (lat_length-1)))/(1 << lat_length)
ret['s'] = 180.0*(lat-(1 << (lat_length-1)))/(1 << lat_length)
else: # can't calculate the half with bit shifts (negative shift)
ret['n'] = 90.0
ret['s'] = -90.0
if lon_length:
ret['e'] = 360.0*(lon+1-(1 << (lon_length-1)))/(1 << lon_length)
ret['w'] = 360.0*(lon-(1 << (lon_length-1)))/(1 << lon_length)
else: # can't calculate the half with bit shifts (negative shift)
ret['e'] = 180.0
ret['w'] = -180.0
return ret
|
decode a hashcode and get north, south, east and west border.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/formula/geo/geohash.py#L189-L217
|
[
"def _int_to_float_hex(i, l):\n if l == 0:\n return -1.0\n\n half = 1 << (l-1)\n s = int((l+3)/4)\n if i >= half:\n i -= half\n return float.fromhex((\"0x0.%0\"+str(s)+\"xp1\") % (i << (s*4-l),))\n else:\n i = half-i\n return float.fromhex((\"-0x0.%0\"+str(s)+\"xp1\") % (i << (s*4-l),))\n",
"def _decode_c2i(hashcode):\n lon = 0\n lat = 0\n bit_length = 0\n lat_length = 0\n lon_length = 0\n for i in hashcode:\n t = _base32_map[i]\n if bit_length % 2 == 0:\n lon <<= 3\n lat <<= 2\n lon += (t >> 2) & 4\n lat += (t >> 2) & 2\n lon += (t >> 1) & 2\n lat += (t >> 1) & 1\n lon += t & 1\n lon_length += 3\n lat_length += 2\n else:\n lon <<= 2\n lat <<= 3\n lat += (t >> 2) & 4\n lon += (t >> 2) & 2\n lat += (t >> 1) & 2\n lon += (t >> 1) & 1\n lat += t & 1\n lon_length += 2\n lat_length += 3\n\n bit_length += 5\n\n return lat, lon, lat_length, lon_length\n"
] |
#!/usr/bin/env python
# coding: utf-8
__all__ = ['encode', 'decode', 'decode_exactly', 'bbox', 'neighbors', 'expand']
_base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
_base32_map = {}
for i in range(len(_base32)):
_base32_map[_base32[i]] = i
del i
LONG_ZERO = 0
import sys
if sys.version_info[0] < 3:
LONG_ZERO = long(0)
def _float_hex_to_int(f):
if f < -1.0 or f >= 1.0:
return None
if f == 0.0:
return 1, 1
h = f.hex()
x = h.find("0x1.")
assert(x >= 0)
p = h.find("p")
assert(p > 0)
half_len = len(h[x+4:p])*4-int(h[p+1:])
if x == 0:
r = (1 << half_len) + ((1 << (len(h[x+4:p])*4)) + int(h[x+4:p], 16))
else:
r = (1 << half_len) - ((1 << (len(h[x+4:p])*4)) + int(h[x+4:p], 16))
return r, half_len+1
def _int_to_float_hex(i, l):
if l == 0:
return -1.0
half = 1 << (l-1)
s = int((l+3)/4)
if i >= half:
i -= half
return float.fromhex(("0x0.%0"+str(s)+"xp1") % (i << (s*4-l),))
else:
i = half-i
return float.fromhex(("-0x0.%0"+str(s)+"xp1") % (i << (s*4-l),))
def _encode_i2c(lat, lon, lat_length, lon_length):
precision = int((lat_length+lon_length)/5)
if lat_length < lon_length:
a = lon
b = lat
else:
a = lat
b = lon
boost = (0, 1, 4, 5, 16, 17, 20, 21)
ret = ''
for i in range(precision):
ret += _base32[(boost[a & 7]+(boost[b & 3] << 1)) & 0x1F]
t = a >> 3
a = b >> 2
b = t
return ret[::-1]
def encode(latitude, longitude, precision=12):
if latitude >= 90.0 or latitude < -90.0:
raise Exception("invalid latitude.")
while longitude < -180.0:
longitude += 360.0
while longitude >= 180.0:
longitude -= 360.0
xprecision = precision+1
lat_length = lon_length = int(xprecision*5/2)
if xprecision % 2 == 1:
lon_length += 1
if hasattr(float, "fromhex"):
a = _float_hex_to_int(latitude/90.0)
o = _float_hex_to_int(longitude/180.0)
if a[1] > lat_length:
ai = a[0] >> (a[1]-lat_length)
else:
ai = a[0] << (lat_length-a[1])
if o[1] > lon_length:
oi = o[0] >> (o[1]-lon_length)
else:
oi = o[0] << (lon_length-o[1])
return _encode_i2c(ai, oi, lat_length, lon_length)[:precision]
lat = latitude/180.0
lon = longitude/360.0
if lat > 0:
lat = int((1 << lat_length)*lat)+(1 << (lat_length-1))
else:
lat = (1 << lat_length-1)-int((1 << lat_length)*(-lat))
if lon > 0:
lon = int((1 << lon_length)*lon)+(1 << (lon_length-1))
else:
lon = (1 << lon_length-1)-int((1 << lon_length)*(-lon))
return _encode_i2c(lat, lon, lat_length, lon_length)[:precision]
def _decode_c2i(hashcode):
lon = 0
lat = 0
bit_length = 0
lat_length = 0
lon_length = 0
for i in hashcode:
t = _base32_map[i]
if bit_length % 2 == 0:
lon <<= 3
lat <<= 2
lon += (t >> 2) & 4
lat += (t >> 2) & 2
lon += (t >> 1) & 2
lat += (t >> 1) & 1
lon += t & 1
lon_length += 3
lat_length += 2
else:
lon <<= 2
lat <<= 3
lat += (t >> 2) & 4
lon += (t >> 2) & 2
lat += (t >> 1) & 2
lon += (t >> 1) & 1
lat += t & 1
lon_length += 2
lat_length += 3
bit_length += 5
return lat, lon, lat_length, lon_length
def decode(hashcode, delta=False):
"""
decode a hashcode and get center coordinate, and distance between center and outer border
"""
lat, lon, lat_length, lon_length = _decode_c2i(hashcode)
if hasattr(float, "fromhex"):
latitude_delta = 90.0/(1 << lat_length)
longitude_delta = 180.0/(1 << lon_length)
latitude = _int_to_float_hex(lat, lat_length) * 90.0 + latitude_delta
longitude = _int_to_float_hex(lon, lon_length) * 180.0 + longitude_delta
if delta:
return latitude, longitude, latitude_delta, longitude_delta
return latitude, longitude
lat = (lat << 1) + 1
lon = (lon << 1) + 1
lat_length += 1
lon_length += 1
latitude = 180.0*(lat-(1 << (lat_length-1)))/(1 << lat_length)
longitude = 360.0*(lon-(1 << (lon_length-1)))/(1 << lon_length)
if delta:
latitude_delta = 180.0/(1 << lat_length)
longitude_delta = 360.0/(1 << lon_length)
return latitude, longitude, latitude_delta, longitude_delta
return latitude, longitude
def decode_exactly(hashcode):
return decode(hashcode, True)
# hashcode operations below
def neighbors(hashcode):
lat, lon, lat_length, lon_length = _decode_c2i(hashcode)
ret = []
tlat = lat
for tlon in (lon-1, lon+1):
code = _encode_i2c(tlat, tlon, lat_length, lon_length)
if code:
ret.append(code)
tlat = lat+1
if not tlat >> lat_length:
for tlon in (lon-1, lon, lon+1):
ret.append(_encode_i2c(tlat, tlon, lat_length, lon_length))
tlat = lat-1
if tlat >= 0:
for tlon in (lon-1, lon, lon+1):
ret.append(_encode_i2c(tlat, tlon, lat_length, lon_length))
return ret
def expand(hashcode):
ret = neighbors(hashcode)
ret.append(hashcode)
return ret
def _uint64_interleave(lat32, lon32):
intr = 0
boost = (0, 1, 4, 5, 16, 17, 20, 21, 64, 65, 68, 69, 80, 81, 84, 85)
for i in range(8):
intr = (intr << 8) + (boost[(lon32 >> (28-i*4)) % 16] << 1) + boost[(lat32 >> (28-i*4)) % 16]
return intr
def _uint64_deinterleave(ui64):
lat = lon = 0
boost = ((0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (0, 3), (1, 2), (1, 3),
(2, 0), (2, 1), (3, 0), (3, 1), (2, 2), (2, 3), (3, 2), (3, 3))
for i in range(16):
p = boost[(ui64 >> (60-i*4)) % 16]
lon = (lon << 2) + p[0]
lat = (lat << 2) + p[1]
return lat, lon
def encode_uint64(latitude, longitude):
if latitude >= 90.0 or latitude < -90.0:
raise ValueError("Latitude must be in the range of (-90.0, 90.0)")
while longitude < -180.0:
longitude += 360.0
while longitude >= 180.0:
longitude -= 360.0
lat = int(((latitude + 90.0)/180.0)*(1 << 32))
lon = int(((longitude+180.0)/360.0)*(1 << 32))
return _uint64_interleave(lat, lon)
def decode_uint64(ui64):
lat, lon = _uint64_deinterleave(ui64)
return 180.0 * lat / (1 << 32) - 90.0, 360.0 * lon / (1 << 32) - 180.0
def expand_uint64(ui64, precision=50):
ui64 &= (0xFFFFFFFFFFFFFFFF << (64-precision))
lat, lon = _uint64_deinterleave(ui64)
lat_grid = 1 << (32-int(precision/2))
lon_grid = lat_grid >> (precision % 2)
if precision <= 2: # expand becomes to the whole range
return []
ranges = []
if lat & lat_grid:
if lon & lon_grid:
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+2))))
if precision % 2 == 0:
# lat, lon = (1, 1) and even precision
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
if lat + lat_grid < 0xFFFFFFFF:
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
# lat,lon = (1, 1) and odd precision
if lat + lat_grid < 0xFFFFFFFF:
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
ui64 = _uint64_interleave(lat+lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
ui64 = _uint64_interleave(lat-lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision+2))))
if precision % 2 == 0:
# lat,lon = (1, 0) and odd precision
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
if lat + lat_grid < 0xFFFFFFFF:
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
# lat,lon = (1, 0) and odd precision
if lat + lat_grid < 0xFFFFFFFF:
ui64 = _uint64_interleave(lat+lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
if lon & lon_grid:
ui64 = _uint64_interleave(lat, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+2))))
if precision % 2 == 0:
# lat,lon = (0, 1) and even precision
ui64 = _uint64_interleave(lat, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
if lat > 0:
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
# lat,lon = (0, 1) and odd precision
if lat > 0:
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
ui64 = _uint64_interleave(lat, lon)
ranges.append((ui64, ui64 + (1 << (64-precision+2))))
if precision % 2 == 0:
# lat,lon = (0, 0) and even precision
ui64 = _uint64_interleave(lat, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
if lat > 0:
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat-lat_grid, lon+lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
else:
# lat,lon = (0, 0) and odd precision
if lat > 0:
ui64 = _uint64_interleave(lat-lat_grid, lon)
ranges.append((ui64, ui64 + (1 << (64-precision+1))))
ui64 = _uint64_interleave(lat-lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ui64 = _uint64_interleave(lat+lat_grid, lon-lon_grid)
ranges.append((ui64, ui64 + (1 << (64-precision))))
ranges.sort()
# merge the conditions
shrink = []
prev = None
for i in ranges:
if prev:
if prev[1] != i[0]:
shrink.append(prev)
prev = i
else:
prev = (prev[0], i[1])
else:
prev = i
shrink.append(prev)
ranges = []
for i in shrink:
a, b = i
if a == 0:
a = None # we can remove the condition because it is the lowest value
if b == 0x10000000000000000:
b = None # we can remove the condition because it is the highest value
ranges.append((a, b))
return ranges
|
Vito2015/pyextend
|
pyextend/core/wrappers/timethis.py
|
timethis
|
python
|
def timethis(func):
func_module, func_name = func.__module__, func.__name__
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = _time_perf_counter()
r = func(*args, **kwargs)
end = _time_perf_counter()
print('timethis : <{}.{}> : {}'.format(func_module, func_name, end - start))
return r
return wrapper
|
A wrapper use for timeit.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/wrappers/timethis.py#L23-L34
| null |
# coding: utf-8
"""
pyextend.core.wrappers.timethis
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pyextend core wrappers timethis wrapper
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import sys
import time
import functools
__all__ = ['timethis']
if sys.version_info < (3, 3):
_time_perf_counter = time.clock
else:
_time_perf_counter = time.perf_counter
if __name__ == "__main__":
from math import sqrt
def compute_roots(nums):
result = []
result_append = result.append
for n in nums:
result_append(sqrt(n))
return result
@timethis
def test():
nums = range(100000)
for n in range(100):
r = compute_roots(nums)
test()
timethis(lambda: [x for x in range(100000)])()
|
Vito2015/pyextend
|
pyextend/core/wrappers/accepts.py
|
accepts
|
python
|
def accepts(exception=TypeError, **types):
def check_param(v, type_or_funcname):
if isinstance(type_or_funcname, tuple):
results1 = [check_param(v, t) for t in type_or_funcname if t is not None]
results2 = [v == t for t in type_or_funcname if t is None]
return any(results1) or any(results2)
is_type_instance, is_func_like = False, False
try:
is_type_instance = isinstance(v, type_or_funcname)
except TypeError:
pass
if isinstance(type_or_funcname, str):
if type_or_funcname == '__iter__' and isinstance(v, str) and version_info < (3,):
# at py 2.x, str object has non `__iter__` attribute,
# str object can use like `for c in s`, bcz `iter(s)` returns an iterable object.
is_func_like = True
else:
is_func_like = hasattr(v, type_or_funcname)
return is_type_instance or is_func_like
def check_accepts(f):
assert len(types) <= f.__code__.co_argcount,\
'accept number of arguments not equal with function number of arguments in "{}"'.format(f.__name__)
@functools.wraps(f)
def new_f(*args, **kwargs):
for i, v in enumerate(args):
if f.__code__.co_varnames[i] in types and \
not check_param(v, types[f.__code__.co_varnames[i]]):
raise exception("function '%s' arg '%s'=%r does not match %s" %
(f.__name__, f.__code__.co_varnames[i], v, types[f.__code__.co_varnames[i]]))
del types[f.__code__.co_varnames[i]]
for k, v in kwargs.items():
if k in types and \
not check_param(v, types[k]):
raise exception("function '%s' arg '%s'=%r does not match %s" % (f.__name__, k, v, types[k]))
return f(*args, **kwargs)
return new_f
return check_accepts
|
A wrapper of function for checking function parameters type
Example 1:
@accepts(a=int, b='__iter__', c=str)
def test(a, b=None, c=None):
print('accepts OK')
test(13, b=[], c='abc') -- OK
test('aaa', b=(), c='abc') --Failed
Example 2:
@accepts(a=int, b=('__iter__', None), c=str)
def test(a, b=None, c=None):
print('accepts OK')
test(13, b=[], c='abc') -- OK
test(13, b=None, c='abc') -- OK
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/wrappers/accepts.py#L15-L77
| null |
# coding: utf-8
"""
pyextend.core.wrappers.accepts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pyextend core wrappers accepts wrapper
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import functools
from .system import version_info
|
Vito2015/pyextend
|
pyextend/core/wrappers/singleton.py
|
singleton
|
python
|
def singleton(cls, *args, **kwargs):
instance = {}
def _singleton():
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance[cls]
return _singleton
|
类单例装饰器
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/wrappers/singleton.py#L12-L20
| null |
# coding: utf-8
"""
pyextend.core.wrappers.singleton
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pyextend core wrappers singleton wrapper
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
|
Vito2015/pyextend
|
pyextend/core/json2csv.py
|
json2csv
|
python
|
def json2csv(json_str, show_header=False, separator='\t'):
json_obj = json.loads(json_str)
cols = [col for col in json_obj.keys()]
vals = [str(json_obj.get(col)) for col in cols]
header = None
if show_header:
header = separator.join(cols)
values = separator.join(vals)
return (header, values) if show_header else values
|
Format a json string to csv like.
:param json_str: json object string
:param show_header: can returns csv header line
:param separator: csv column format separator
:return: if show_header=False: a string like csv formatting;
if show_header=True: a tuple (header, csv string)
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/json2csv.py#L6-L23
| null |
#!/usr/bin/env python
# coding: utf-8
import json
# if __name__ == '__main__':
# source = [
# '{"dbm": 0, "created": "2016-03-21 08:07:11", "registered": false, "lat": 30.303772000000002, "radio": "LTE", '
# '"key": "1cc0000000058b304cf3881", "provider": "network", "imei": "867323022973331", "lng": 120.31240999999997}',
# '{"dbm": 0, "created": "2016-03-04 06:00:46", "registered": false, "lat": 30.259325583333339, "radio": "LTE",'
# ' "key": "1cc00000000681e0b0c5a06", "provider": "network", "imei": "867831028141570", "lng": 120.19723466666666}',
# '{"dbm": 0, "created": "2016-03-08 07:50:40", "registered": false, "lat": 28.959314250000002, "radio": "LTE",'
# ' "key": "1cc0000000057090bc0ea02", "provider": "network", "imei": "869634022225904", "lng": 118.88883787500001}',
# ]
#
# print (json2csv(source[0], show_header=True)[0])
# for line in source:
# print(json2csv(line))
|
Vito2015/pyextend
|
pyextend/core/wrappers/timeout.py
|
timeout
|
python
|
def timeout(seconds, error_message=None):
def decorated(func):
result = ""
def _handle_timeout(signum, frame):
errmsg = error_message or 'Timeout: The action <%s> is timeout!' % func.__name__
global result
result = None
import inspect
stack_frame = inspect.stack()[4]
file_name = os.path.basename(stack_frame[1])
line_no = stack_frame[2]
method_name = stack_frame[3]
code_text = ','.join(stack_frame[4])
stack_info = 'Stack: %s, %s:%s >%s' % (method_name, file_name, line_no, code_text)
sys.stderr.write(errmsg+'\n')
sys.stderr.write(stack_info+'\n')
raise TimeoutError(errmsg)
@sysx.platform(sysx.UNIX_LIKE, case_false_wraps=func)
def wrapper(*args, **kwargs):
global result
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return functools.wraps(func)(wrapper)
return decorated
|
Timeout checking just for Linux-like platform, not working in Windows platform.
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/wrappers/timeout.py#L25-L60
| null |
# coding: utf-8
"""
pyextend.core.wrappers.timeout
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pyextend core wrappers timeout wrapper
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
import os
import sys
import signal
import functools
from . import system as sysx
if sysx.version_info < (3,):
class TimeoutError(OSError):
""" Timeout expired. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
Vito2015/pyextend
|
pyextend/core/itertools.py
|
unpack
|
python
|
def unpack(iterable, count, fill=None):
iterable = list(enumerate(iterable))
cnt = count if count <= len(iterable) else len(iterable)
results = [iterable[i][1] for i in range(cnt)]
# results[len(results):len(results)] = [fill for i in range(count-cnt)]
results = merge(results, [fill for i in range(count-cnt)])
return tuple(results)
|
The iter data unpack function.
Example 1:
In[1]: source = 'abc'
In[2]: a, b = safe_unpack(source, 2)
In[3]: print(a, b)
a b
Example 2:
In[1]: source = 'abc'
In[2]: a, b, c, d = safe_unpack(source, 4)
In[3]: print(a, b, c, d)
a b None None
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/itertools.py#L19-L41
| null |
# coding: utf-8
"""
pyextend.core.itertools
~~~~~~~~~~~~~~~~~~~~~
pyextend core string extension tools
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
__all__ = ['unpack', 'merge']
# from pyextend.core.wrappers import accepts
from .wrappers import accepts
@accepts(iterable='__iter__', count=int)
@accepts(iterable1='__iter__')
def merge(iterable1, *args):
"""
Returns an type of iterable1 value, which merged after iterable1 used *args
:exception TypeError: if any parameter type of args not equals type(iterable1)
Example 1:
source = ['a', 'b', 'c']
result = merge(source, [1, 2, 3])
self.assertEqual(result, ['a', 'b', 'c', 1, 2, 3])
result = merge(source, [1, 2, 3], ['x', 'y', 'z'])
self.assertEqual(result, ['a', 'b', 'c', 1, 2, 3, 'x', 'y', 'z'])
Example 2:
source = 'abc'
result = merge(source, '123')
self.assertEqual(result, 'abc123')
result = merge(source, '123', 'xyz')
self.assertEqual(result, 'abc123xyz')
Example 3:
source = ('a', 'b', 'c')
result = merge(source, (1, 2, 3))
self.assertEqual(result, ('a', 'b', 'c', 1, 2, 3))
result = merge(source, (1, 2, 3), ('x', 'y', 'z'))
self.assertEqual(result, ('a', 'b', 'c', 1, 2, 3, 'x', 'y', 'z'))
Example 4:
source = {'a': 1, 'b': 2, 'c': 3}
result = merge(source, {'x': 'm', 'y': 'n'}, {'z': '1'})
self.assertEqual(result, {'a': 1, 'b': 2, 'c': 3, 'x': 'm', 'y': 'n', 'z': '1'})
"""
result_list = list(iterable1) if not isinstance(iterable1, dict) else eval('list(iterable1.items())')
for i, other in enumerate(args, start=1):
if not isinstance(other, type(iterable1)):
raise TypeError('the parameter type of index {} not equals type of index 0'.format(i))
if not isinstance(other, dict):
result_list[len(result_list):len(result_list)] = list(other)
else:
result_list[len(result_list):len(result_list)] = list(other.items())
if isinstance(iterable1, str):
return ''.join(result_list)
elif isinstance(iterable1, tuple):
return tuple(result_list)
elif isinstance(iterable1, dict):
return dict(result_list)
else:
return result_list
|
Vito2015/pyextend
|
pyextend/core/itertools.py
|
merge
|
python
|
def merge(iterable1, *args):
result_list = list(iterable1) if not isinstance(iterable1, dict) else eval('list(iterable1.items())')
for i, other in enumerate(args, start=1):
if not isinstance(other, type(iterable1)):
raise TypeError('the parameter type of index {} not equals type of index 0'.format(i))
if not isinstance(other, dict):
result_list[len(result_list):len(result_list)] = list(other)
else:
result_list[len(result_list):len(result_list)] = list(other.items())
if isinstance(iterable1, str):
return ''.join(result_list)
elif isinstance(iterable1, tuple):
return tuple(result_list)
elif isinstance(iterable1, dict):
return dict(result_list)
else:
return result_list
|
Returns an type of iterable1 value, which merged after iterable1 used *args
:exception TypeError: if any parameter type of args not equals type(iterable1)
Example 1:
source = ['a', 'b', 'c']
result = merge(source, [1, 2, 3])
self.assertEqual(result, ['a', 'b', 'c', 1, 2, 3])
result = merge(source, [1, 2, 3], ['x', 'y', 'z'])
self.assertEqual(result, ['a', 'b', 'c', 1, 2, 3, 'x', 'y', 'z'])
Example 2:
source = 'abc'
result = merge(source, '123')
self.assertEqual(result, 'abc123')
result = merge(source, '123', 'xyz')
self.assertEqual(result, 'abc123xyz')
Example 3:
source = ('a', 'b', 'c')
result = merge(source, (1, 2, 3))
self.assertEqual(result, ('a', 'b', 'c', 1, 2, 3))
result = merge(source, (1, 2, 3), ('x', 'y', 'z'))
self.assertEqual(result, ('a', 'b', 'c', 1, 2, 3, 'x', 'y', 'z'))
Example 4:
source = {'a': 1, 'b': 2, 'c': 3}
result = merge(source, {'x': 'm', 'y': 'n'}, {'z': '1'})
self.assertEqual(result, {'a': 1, 'b': 2, 'c': 3, 'x': 'm', 'y': 'n', 'z': '1'})
|
train
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/itertools.py#L45-L99
| null |
# coding: utf-8
"""
pyextend.core.itertools
~~~~~~~~~~~~~~~~~~~~~
pyextend core string extension tools
:copyright: (c) 2016 by Vito.
:license: GNU, see LICENSE for more details.
"""
__all__ = ['unpack', 'merge']
# from pyextend.core.wrappers import accepts
from .wrappers import accepts
@accepts(iterable='__iter__', count=int)
def unpack(iterable, count, fill=None):
"""
The iter data unpack function.
Example 1:
In[1]: source = 'abc'
In[2]: a, b = safe_unpack(source, 2)
In[3]: print(a, b)
a b
Example 2:
In[1]: source = 'abc'
In[2]: a, b, c, d = safe_unpack(source, 4)
In[3]: print(a, b, c, d)
a b None None
"""
iterable = list(enumerate(iterable))
cnt = count if count <= len(iterable) else len(iterable)
results = [iterable[i][1] for i in range(cnt)]
# results[len(results):len(results)] = [fill for i in range(count-cnt)]
results = merge(results, [fill for i in range(count-cnt)])
return tuple(results)
@accepts(iterable1='__iter__')
|
datakortet/dkfileutils
|
dkfileutils/path.py
|
Path.touch
|
python
|
def touch(self, mode=0o666, exist_ok=True):
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
os.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = os.open(self, flags, mode)
os.close(fd)
|
Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py)
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/path.py#L99-L121
| null |
class Path(str):
"""Poor man's pathlib.
"""
def __new__(cls, *args, **kw):
if isinstance(args[0], Path):
return str.__new__(cls, str(args[0]), **kw)
else:
return str.__new__(cls, os.path.normcase(args[0]), **kw)
def __div__(self, other):
return Path(
os.path.normcase(
os.path.normpath(
os.path.join(self, other)
)
)
)
__truediv__ = __div__
@doc(os.unlink)
def unlink(self):
os.unlink(self)
def open(self, mode='r'):
return open(self, mode)
def read(self, mode='r'):
with self.open(mode) as fp:
return fp.read()
def write(self, txt, mode='w'):
with self.open(mode) as fp:
fp.write(txt)
def append(self, txt, mode='a'):
with self.open(mode) as fp:
fp.write(txt)
def __iter__(self):
for root, dirs, files in os.walk(self):
dotdirs = [d for d in dirs if d.startswith('.')]
for d in dotdirs:
dirs.remove(d)
dotfiles = [d for d in files if d.startswith('.')]
for d in dotfiles:
files.remove(d)
for fname in files:
yield Path(os.path.join(root, fname))
def __contains__(self, item):
if self.isdir():
return item in self.listdir()
return super(Path, self).__contains__(item)
@doc(shutil.rmtree)
def rmtree(self, subdir=None):
if subdir is not None:
shutil.rmtree(self / subdir, ignore_errors=True)
else:
shutil.rmtree(self, ignore_errors=True)
def contents(self):
res = [d.relpath(self) for d in self.glob('**/*')]
res.sort()
return res
@classmethod
def curdir(cls):
"""Initialize a Path object on the current directory.
"""
return cls(os.getcwd())
def touch(self, mode=0o666, exist_ok=True):
"""Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py)
"""
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
os.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = os.open(self, flags, mode)
os.close(fd)
def glob(self, pat):
"""`pat` can be an extended glob pattern, e.g. `'**/*.less'`
This code handles negations similarly to node.js' minimatch, i.e.
a leading `!` will negate the entire pattern.
"""
r = ""
negate = int(pat.startswith('!'))
i = negate
while i < len(pat):
if pat[i:i + 3] == '**/':
r += "(?:.*/)?"
i += 3
elif pat[i] == "*":
r += "[^/]*"
i += 1
elif pat[i] == ".":
r += "[.]"
i += 1
elif pat[i] == "?":
r += "."
i += 1
else:
r += pat[i]
i += 1
r += r'\Z(?ms)'
# print '\n\npat', pat
# print 'regex:', r
# print [s.relpath(self).replace('\\', '/') for s in self]
rx = re.compile(r)
def match(d):
m = rx.match(d)
return not m if negate else m
return [s for s in self if match(s.relpath(self).replace('\\', '/'))]
@doc(os.path.abspath)
def abspath(self):
return Path(os.path.abspath(self))
absolute = abspath # pathlib
def drive(self):
"""Return the drive of `self`.
"""
return self.splitdrive()[0]
def drivepath(self):
"""The path local to this drive (i.e. remove drive letter).
"""
return self.splitdrive()[1]
@doc(os.path.basename)
def basename(self):
return Path(os.path.basename(self))
@doc(os.path.commonprefix)
def commonprefix(self, *args):
return os.path.commonprefix([str(self)] + [str(a) for a in args])
@doc(os.path.dirname)
def dirname(self):
return Path(os.path.dirname(self))
@doc(os.path.exists)
def exists(self):
return os.path.exists(self)
@doc(os.path.expanduser)
def expanduser(self):
return Path(os.path.expanduser(self))
@doc(os.path.expandvars)
def expandvars(self):
return Path(os.path.expandvars(self))
@doc(os.path.getatime)
def getatime(self):
return os.path.getatime(self)
@doc(os.path.getctime)
def getctime(self):
return os.path.getctime(self)
@doc(os.path.getmtime)
def getmtime(self):
return os.path.getmtime(self)
@doc(os.path.getsize)
def getsize(self):
return os.path.getsize(self)
@doc(os.path.isabs)
def isabs(self):
return os.path.isabs(self)
@doc(os.path.isdir)
def isdir(self, *args, **kw):
return os.path.isdir(self, *args, **kw)
@doc(os.path.isfile)
def isfile(self):
return os.path.isfile(self)
@doc(os.path.islink)
def islink(self):
return os.path.islink(self)
@doc(os.path.ismount)
def ismount(self):
return os.path.ismount(self)
@doc(os.path.join)
def join(self, *args):
return Path(os.path.join(self, *args))
@doc(os.path.lexists)
def lexists(self):
return os.path.lexists(self)
@doc(os.path.normcase)
def normcase(self):
return Path(os.path.normcase(self))
@doc(os.path.normpath)
def normpath(self):
return Path(os.path.normpath(str(self)))
@doc(os.path.realpath)
def realpath(self):
return Path(os.path.realpath(self))
@doc(os.path.relpath)
def relpath(self, other=""):
return Path(os.path.relpath(str(self), str(other)))
@doc(os.path.split)
def split(self, sep=None, maxsplit=-1):
# some heuristics to determine if this is a str.split call or
# a os.split call...
sval = str(self)
if sep is not None or ' ' in sval:
return sval.split(sep or ' ', maxsplit)
return os.path.split(self)
def parts(self):
res = re.split(r"[\\/]", self)
if res and os.path.splitdrive(res[0]) == (res[0], ''):
res[0] += os.path.sep
return res
def parent_iter(self):
parts = self.abspath().normpath().normcase().parts()
for i in range(1, len(parts)):
yield Path(os.path.join(*parts[:-i]))
@property
def parents(self):
return list(self.parent_iter())
@property
def parent(self):
return self.parents[0]
@doc(os.path.splitdrive)
def splitdrive(self):
drive, pth = os.path.splitdrive(self)
return drive, Path(pth)
@doc(os.path.splitext)
def splitext(self):
return os.path.splitext(self)
@property
def ext(self):
return self.splitext()[1]
def switchext(self, ext):
return self.splitext()[0] + ext
if hasattr(os.path, 'splitunc'): # pragma: nocover
@doc(os.path.splitunc)
def splitunc(self):
return os.path.splitunc(self)
@doc(os.access)
def access(self, *args, **kw):
return os.access(self, *args, **kw)
@doc(os.chdir)
def chdir(self):
return os.chdir(self)
@contextmanager
def cd(self):
cwd = os.getcwd()
try:
self.chdir()
yield self
finally:
os.chdir(cwd)
@doc(os.chmod)
def chmod(self, *args, **kw):
return os.chmod(self, *args, **kw)
def list(self, filterfn=lambda x: True):
"""Return all direct descendands of directory `self` for which
`filterfn` returns True.
"""
return [self / p for p in self.listdir() if filterfn(self / p)]
@doc(os.listdir)
def listdir(self):
return [Path(p) for p in os.listdir(self)]
def subdirs(self):
"""Return all direct sub-directories.
"""
return self.list(lambda p: p.isdir())
def files(self):
"""Return all files in directory.
"""
return self.list(lambda p: p.isfile())
@doc(os.lstat)
def lstat(self):
return os.lstat(self)
@doc(os.makedirs)
def makedirs(self, path=None, mode=0o777):
pth = os.path.join(self, path) if path else self
try:
os.makedirs(pth, mode)
except OSError:
pass
return Path(pth)
@doc(os.mkdir)
def mkdir(self, path, mode=0o777):
pth = os.path.join(self, path)
os.mkdir(pth, mode)
return Path(pth)
@doc(os.remove)
def remove(self):
return os.remove(self)
def rm(self, fname=None):
"""Remove a file, don't raise exception if file does not exist.
"""
if fname is not None:
return (self / fname).rm()
try:
self.remove()
except OSError:
pass
@doc(os.removedirs)
def removedirs(self):
return os.removedirs(self)
@doc(shutil.move)
def move(self, dst):
return shutil.move(self, dst)
@doc(os.rename)
def rename(self, *args, **kw):
return os.rename(self, *args, **kw)
@doc(os.renames)
def renames(self, *args, **kw):
return os.renames(self, *args, **kw)
@doc(os.rmdir)
def rmdir(self):
return os.rmdir(self)
if hasattr(os, 'startfile'): # pragma: nocover
@doc(os.startfile)
def startfile(self, *args, **kw):
return os.startfile(self, *args, **kw)
@doc(os.stat)
def stat(self, *args, **kw):
return os.stat(self, *args, **kw)
@doc(os.utime)
def utime(self, time=None):
os.utime(self, time)
return self.stat()
def __add__(self, other):
return Path(str(self) + str(other))
|
datakortet/dkfileutils
|
dkfileutils/path.py
|
Path.glob
|
python
|
def glob(self, pat):
r = ""
negate = int(pat.startswith('!'))
i = negate
while i < len(pat):
if pat[i:i + 3] == '**/':
r += "(?:.*/)?"
i += 3
elif pat[i] == "*":
r += "[^/]*"
i += 1
elif pat[i] == ".":
r += "[.]"
i += 1
elif pat[i] == "?":
r += "."
i += 1
else:
r += pat[i]
i += 1
r += r'\Z(?ms)'
# print '\n\npat', pat
# print 'regex:', r
# print [s.relpath(self).replace('\\', '/') for s in self]
rx = re.compile(r)
def match(d):
m = rx.match(d)
return not m if negate else m
return [s for s in self if match(s.relpath(self).replace('\\', '/'))]
|
`pat` can be an extended glob pattern, e.g. `'**/*.less'`
This code handles negations similarly to node.js' minimatch, i.e.
a leading `!` will negate the entire pattern.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/path.py#L123-L158
| null |
class Path(str):
"""Poor man's pathlib.
"""
def __new__(cls, *args, **kw):
if isinstance(args[0], Path):
return str.__new__(cls, str(args[0]), **kw)
else:
return str.__new__(cls, os.path.normcase(args[0]), **kw)
def __div__(self, other):
return Path(
os.path.normcase(
os.path.normpath(
os.path.join(self, other)
)
)
)
__truediv__ = __div__
@doc(os.unlink)
def unlink(self):
os.unlink(self)
def open(self, mode='r'):
return open(self, mode)
def read(self, mode='r'):
with self.open(mode) as fp:
return fp.read()
def write(self, txt, mode='w'):
with self.open(mode) as fp:
fp.write(txt)
def append(self, txt, mode='a'):
with self.open(mode) as fp:
fp.write(txt)
def __iter__(self):
for root, dirs, files in os.walk(self):
dotdirs = [d for d in dirs if d.startswith('.')]
for d in dotdirs:
dirs.remove(d)
dotfiles = [d for d in files if d.startswith('.')]
for d in dotfiles:
files.remove(d)
for fname in files:
yield Path(os.path.join(root, fname))
def __contains__(self, item):
if self.isdir():
return item in self.listdir()
return super(Path, self).__contains__(item)
@doc(shutil.rmtree)
def rmtree(self, subdir=None):
if subdir is not None:
shutil.rmtree(self / subdir, ignore_errors=True)
else:
shutil.rmtree(self, ignore_errors=True)
def contents(self):
res = [d.relpath(self) for d in self.glob('**/*')]
res.sort()
return res
@classmethod
def curdir(cls):
"""Initialize a Path object on the current directory.
"""
return cls(os.getcwd())
def touch(self, mode=0o666, exist_ok=True):
"""Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py)
"""
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
os.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = os.open(self, flags, mode)
os.close(fd)
@doc(os.path.abspath)
def abspath(self):
return Path(os.path.abspath(self))
absolute = abspath # pathlib
def drive(self):
"""Return the drive of `self`.
"""
return self.splitdrive()[0]
def drivepath(self):
"""The path local to this drive (i.e. remove drive letter).
"""
return self.splitdrive()[1]
@doc(os.path.basename)
def basename(self):
return Path(os.path.basename(self))
@doc(os.path.commonprefix)
def commonprefix(self, *args):
return os.path.commonprefix([str(self)] + [str(a) for a in args])
@doc(os.path.dirname)
def dirname(self):
return Path(os.path.dirname(self))
@doc(os.path.exists)
def exists(self):
return os.path.exists(self)
@doc(os.path.expanduser)
def expanduser(self):
return Path(os.path.expanduser(self))
@doc(os.path.expandvars)
def expandvars(self):
return Path(os.path.expandvars(self))
@doc(os.path.getatime)
def getatime(self):
return os.path.getatime(self)
@doc(os.path.getctime)
def getctime(self):
return os.path.getctime(self)
@doc(os.path.getmtime)
def getmtime(self):
return os.path.getmtime(self)
@doc(os.path.getsize)
def getsize(self):
return os.path.getsize(self)
@doc(os.path.isabs)
def isabs(self):
return os.path.isabs(self)
@doc(os.path.isdir)
def isdir(self, *args, **kw):
return os.path.isdir(self, *args, **kw)
@doc(os.path.isfile)
def isfile(self):
return os.path.isfile(self)
@doc(os.path.islink)
def islink(self):
return os.path.islink(self)
@doc(os.path.ismount)
def ismount(self):
return os.path.ismount(self)
@doc(os.path.join)
def join(self, *args):
return Path(os.path.join(self, *args))
@doc(os.path.lexists)
def lexists(self):
return os.path.lexists(self)
@doc(os.path.normcase)
def normcase(self):
return Path(os.path.normcase(self))
@doc(os.path.normpath)
def normpath(self):
return Path(os.path.normpath(str(self)))
@doc(os.path.realpath)
def realpath(self):
return Path(os.path.realpath(self))
@doc(os.path.relpath)
def relpath(self, other=""):
return Path(os.path.relpath(str(self), str(other)))
@doc(os.path.split)
def split(self, sep=None, maxsplit=-1):
# some heuristics to determine if this is a str.split call or
# a os.split call...
sval = str(self)
if sep is not None or ' ' in sval:
return sval.split(sep or ' ', maxsplit)
return os.path.split(self)
def parts(self):
res = re.split(r"[\\/]", self)
if res and os.path.splitdrive(res[0]) == (res[0], ''):
res[0] += os.path.sep
return res
def parent_iter(self):
parts = self.abspath().normpath().normcase().parts()
for i in range(1, len(parts)):
yield Path(os.path.join(*parts[:-i]))
@property
def parents(self):
return list(self.parent_iter())
@property
def parent(self):
return self.parents[0]
@doc(os.path.splitdrive)
def splitdrive(self):
drive, pth = os.path.splitdrive(self)
return drive, Path(pth)
@doc(os.path.splitext)
def splitext(self):
return os.path.splitext(self)
@property
def ext(self):
return self.splitext()[1]
def switchext(self, ext):
return self.splitext()[0] + ext
if hasattr(os.path, 'splitunc'): # pragma: nocover
@doc(os.path.splitunc)
def splitunc(self):
return os.path.splitunc(self)
@doc(os.access)
def access(self, *args, **kw):
return os.access(self, *args, **kw)
@doc(os.chdir)
def chdir(self):
return os.chdir(self)
@contextmanager
def cd(self):
cwd = os.getcwd()
try:
self.chdir()
yield self
finally:
os.chdir(cwd)
@doc(os.chmod)
def chmod(self, *args, **kw):
return os.chmod(self, *args, **kw)
def list(self, filterfn=lambda x: True):
"""Return all direct descendands of directory `self` for which
`filterfn` returns True.
"""
return [self / p for p in self.listdir() if filterfn(self / p)]
@doc(os.listdir)
def listdir(self):
return [Path(p) for p in os.listdir(self)]
def subdirs(self):
"""Return all direct sub-directories.
"""
return self.list(lambda p: p.isdir())
def files(self):
"""Return all files in directory.
"""
return self.list(lambda p: p.isfile())
@doc(os.lstat)
def lstat(self):
return os.lstat(self)
@doc(os.makedirs)
def makedirs(self, path=None, mode=0o777):
pth = os.path.join(self, path) if path else self
try:
os.makedirs(pth, mode)
except OSError:
pass
return Path(pth)
@doc(os.mkdir)
def mkdir(self, path, mode=0o777):
pth = os.path.join(self, path)
os.mkdir(pth, mode)
return Path(pth)
@doc(os.remove)
def remove(self):
return os.remove(self)
def rm(self, fname=None):
"""Remove a file, don't raise exception if file does not exist.
"""
if fname is not None:
return (self / fname).rm()
try:
self.remove()
except OSError:
pass
@doc(os.removedirs)
def removedirs(self):
return os.removedirs(self)
@doc(shutil.move)
def move(self, dst):
return shutil.move(self, dst)
@doc(os.rename)
def rename(self, *args, **kw):
return os.rename(self, *args, **kw)
@doc(os.renames)
def renames(self, *args, **kw):
return os.renames(self, *args, **kw)
@doc(os.rmdir)
def rmdir(self):
return os.rmdir(self)
if hasattr(os, 'startfile'): # pragma: nocover
@doc(os.startfile)
def startfile(self, *args, **kw):
return os.startfile(self, *args, **kw)
@doc(os.stat)
def stat(self, *args, **kw):
return os.stat(self, *args, **kw)
@doc(os.utime)
def utime(self, time=None):
os.utime(self, time)
return self.stat()
def __add__(self, other):
return Path(str(self) + str(other))
|
datakortet/dkfileutils
|
dkfileutils/path.py
|
Path.list
|
python
|
def list(self, filterfn=lambda x: True):
return [self / p for p in self.listdir() if filterfn(self / p)]
|
Return all direct descendands of directory `self` for which
`filterfn` returns True.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/path.py#L329-L333
| null |
class Path(str):
"""Poor man's pathlib.
"""
def __new__(cls, *args, **kw):
if isinstance(args[0], Path):
return str.__new__(cls, str(args[0]), **kw)
else:
return str.__new__(cls, os.path.normcase(args[0]), **kw)
def __div__(self, other):
return Path(
os.path.normcase(
os.path.normpath(
os.path.join(self, other)
)
)
)
__truediv__ = __div__
@doc(os.unlink)
def unlink(self):
os.unlink(self)
def open(self, mode='r'):
return open(self, mode)
def read(self, mode='r'):
with self.open(mode) as fp:
return fp.read()
def write(self, txt, mode='w'):
with self.open(mode) as fp:
fp.write(txt)
def append(self, txt, mode='a'):
with self.open(mode) as fp:
fp.write(txt)
def __iter__(self):
for root, dirs, files in os.walk(self):
dotdirs = [d for d in dirs if d.startswith('.')]
for d in dotdirs:
dirs.remove(d)
dotfiles = [d for d in files if d.startswith('.')]
for d in dotfiles:
files.remove(d)
for fname in files:
yield Path(os.path.join(root, fname))
def __contains__(self, item):
if self.isdir():
return item in self.listdir()
return super(Path, self).__contains__(item)
@doc(shutil.rmtree)
def rmtree(self, subdir=None):
if subdir is not None:
shutil.rmtree(self / subdir, ignore_errors=True)
else:
shutil.rmtree(self, ignore_errors=True)
def contents(self):
res = [d.relpath(self) for d in self.glob('**/*')]
res.sort()
return res
@classmethod
def curdir(cls):
"""Initialize a Path object on the current directory.
"""
return cls(os.getcwd())
def touch(self, mode=0o666, exist_ok=True):
"""Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py)
"""
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
os.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = os.open(self, flags, mode)
os.close(fd)
def glob(self, pat):
"""`pat` can be an extended glob pattern, e.g. `'**/*.less'`
This code handles negations similarly to node.js' minimatch, i.e.
a leading `!` will negate the entire pattern.
"""
r = ""
negate = int(pat.startswith('!'))
i = negate
while i < len(pat):
if pat[i:i + 3] == '**/':
r += "(?:.*/)?"
i += 3
elif pat[i] == "*":
r += "[^/]*"
i += 1
elif pat[i] == ".":
r += "[.]"
i += 1
elif pat[i] == "?":
r += "."
i += 1
else:
r += pat[i]
i += 1
r += r'\Z(?ms)'
# print '\n\npat', pat
# print 'regex:', r
# print [s.relpath(self).replace('\\', '/') for s in self]
rx = re.compile(r)
def match(d):
m = rx.match(d)
return not m if negate else m
return [s for s in self if match(s.relpath(self).replace('\\', '/'))]
@doc(os.path.abspath)
def abspath(self):
return Path(os.path.abspath(self))
absolute = abspath # pathlib
def drive(self):
"""Return the drive of `self`.
"""
return self.splitdrive()[0]
def drivepath(self):
"""The path local to this drive (i.e. remove drive letter).
"""
return self.splitdrive()[1]
@doc(os.path.basename)
def basename(self):
return Path(os.path.basename(self))
@doc(os.path.commonprefix)
def commonprefix(self, *args):
return os.path.commonprefix([str(self)] + [str(a) for a in args])
@doc(os.path.dirname)
def dirname(self):
return Path(os.path.dirname(self))
@doc(os.path.exists)
def exists(self):
return os.path.exists(self)
@doc(os.path.expanduser)
def expanduser(self):
return Path(os.path.expanduser(self))
@doc(os.path.expandvars)
def expandvars(self):
return Path(os.path.expandvars(self))
@doc(os.path.getatime)
def getatime(self):
return os.path.getatime(self)
@doc(os.path.getctime)
def getctime(self):
return os.path.getctime(self)
@doc(os.path.getmtime)
def getmtime(self):
return os.path.getmtime(self)
@doc(os.path.getsize)
def getsize(self):
return os.path.getsize(self)
@doc(os.path.isabs)
def isabs(self):
return os.path.isabs(self)
@doc(os.path.isdir)
def isdir(self, *args, **kw):
return os.path.isdir(self, *args, **kw)
@doc(os.path.isfile)
def isfile(self):
return os.path.isfile(self)
@doc(os.path.islink)
def islink(self):
return os.path.islink(self)
@doc(os.path.ismount)
def ismount(self):
return os.path.ismount(self)
@doc(os.path.join)
def join(self, *args):
return Path(os.path.join(self, *args))
@doc(os.path.lexists)
def lexists(self):
return os.path.lexists(self)
@doc(os.path.normcase)
def normcase(self):
return Path(os.path.normcase(self))
@doc(os.path.normpath)
def normpath(self):
return Path(os.path.normpath(str(self)))
@doc(os.path.realpath)
def realpath(self):
return Path(os.path.realpath(self))
@doc(os.path.relpath)
def relpath(self, other=""):
return Path(os.path.relpath(str(self), str(other)))
@doc(os.path.split)
def split(self, sep=None, maxsplit=-1):
# some heuristics to determine if this is a str.split call or
# a os.split call...
sval = str(self)
if sep is not None or ' ' in sval:
return sval.split(sep or ' ', maxsplit)
return os.path.split(self)
def parts(self):
res = re.split(r"[\\/]", self)
if res and os.path.splitdrive(res[0]) == (res[0], ''):
res[0] += os.path.sep
return res
def parent_iter(self):
parts = self.abspath().normpath().normcase().parts()
for i in range(1, len(parts)):
yield Path(os.path.join(*parts[:-i]))
@property
def parents(self):
return list(self.parent_iter())
@property
def parent(self):
return self.parents[0]
@doc(os.path.splitdrive)
def splitdrive(self):
drive, pth = os.path.splitdrive(self)
return drive, Path(pth)
@doc(os.path.splitext)
def splitext(self):
return os.path.splitext(self)
@property
def ext(self):
return self.splitext()[1]
def switchext(self, ext):
return self.splitext()[0] + ext
if hasattr(os.path, 'splitunc'): # pragma: nocover
@doc(os.path.splitunc)
def splitunc(self):
return os.path.splitunc(self)
@doc(os.access)
def access(self, *args, **kw):
return os.access(self, *args, **kw)
@doc(os.chdir)
def chdir(self):
return os.chdir(self)
@contextmanager
def cd(self):
cwd = os.getcwd()
try:
self.chdir()
yield self
finally:
os.chdir(cwd)
@doc(os.chmod)
def chmod(self, *args, **kw):
return os.chmod(self, *args, **kw)
@doc(os.listdir)
def listdir(self):
return [Path(p) for p in os.listdir(self)]
def subdirs(self):
"""Return all direct sub-directories.
"""
return self.list(lambda p: p.isdir())
def files(self):
"""Return all files in directory.
"""
return self.list(lambda p: p.isfile())
@doc(os.lstat)
def lstat(self):
return os.lstat(self)
@doc(os.makedirs)
def makedirs(self, path=None, mode=0o777):
pth = os.path.join(self, path) if path else self
try:
os.makedirs(pth, mode)
except OSError:
pass
return Path(pth)
@doc(os.mkdir)
def mkdir(self, path, mode=0o777):
pth = os.path.join(self, path)
os.mkdir(pth, mode)
return Path(pth)
@doc(os.remove)
def remove(self):
return os.remove(self)
def rm(self, fname=None):
"""Remove a file, don't raise exception if file does not exist.
"""
if fname is not None:
return (self / fname).rm()
try:
self.remove()
except OSError:
pass
@doc(os.removedirs)
def removedirs(self):
return os.removedirs(self)
@doc(shutil.move)
def move(self, dst):
return shutil.move(self, dst)
@doc(os.rename)
def rename(self, *args, **kw):
return os.rename(self, *args, **kw)
@doc(os.renames)
def renames(self, *args, **kw):
return os.renames(self, *args, **kw)
@doc(os.rmdir)
def rmdir(self):
return os.rmdir(self)
if hasattr(os, 'startfile'): # pragma: nocover
@doc(os.startfile)
def startfile(self, *args, **kw):
return os.startfile(self, *args, **kw)
@doc(os.stat)
def stat(self, *args, **kw):
return os.stat(self, *args, **kw)
@doc(os.utime)
def utime(self, time=None):
os.utime(self, time)
return self.stat()
def __add__(self, other):
return Path(str(self) + str(other))
|
datakortet/dkfileutils
|
dkfileutils/path.py
|
Path.rm
|
python
|
def rm(self, fname=None):
if fname is not None:
return (self / fname).rm()
try:
self.remove()
except OSError:
pass
|
Remove a file, don't raise exception if file does not exist.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/path.py#L372-L380
| null |
class Path(str):
"""Poor man's pathlib.
"""
def __new__(cls, *args, **kw):
if isinstance(args[0], Path):
return str.__new__(cls, str(args[0]), **kw)
else:
return str.__new__(cls, os.path.normcase(args[0]), **kw)
def __div__(self, other):
return Path(
os.path.normcase(
os.path.normpath(
os.path.join(self, other)
)
)
)
__truediv__ = __div__
@doc(os.unlink)
def unlink(self):
os.unlink(self)
def open(self, mode='r'):
return open(self, mode)
def read(self, mode='r'):
with self.open(mode) as fp:
return fp.read()
def write(self, txt, mode='w'):
with self.open(mode) as fp:
fp.write(txt)
def append(self, txt, mode='a'):
with self.open(mode) as fp:
fp.write(txt)
def __iter__(self):
for root, dirs, files in os.walk(self):
dotdirs = [d for d in dirs if d.startswith('.')]
for d in dotdirs:
dirs.remove(d)
dotfiles = [d for d in files if d.startswith('.')]
for d in dotfiles:
files.remove(d)
for fname in files:
yield Path(os.path.join(root, fname))
def __contains__(self, item):
if self.isdir():
return item in self.listdir()
return super(Path, self).__contains__(item)
@doc(shutil.rmtree)
def rmtree(self, subdir=None):
if subdir is not None:
shutil.rmtree(self / subdir, ignore_errors=True)
else:
shutil.rmtree(self, ignore_errors=True)
def contents(self):
res = [d.relpath(self) for d in self.glob('**/*')]
res.sort()
return res
@classmethod
def curdir(cls):
"""Initialize a Path object on the current directory.
"""
return cls(os.getcwd())
def touch(self, mode=0o666, exist_ok=True):
"""Create this file with the given access mode, if it doesn't exist.
Based on:
https://github.com/python/cpython/blob/master/Lib/pathlib.py)
"""
if exist_ok:
# First try to bump modification time
# Implementation note: GNU touch uses the UTIME_NOW option of
# the utimensat() / futimens() functions.
try:
os.utime(self, None)
except OSError:
# Avoid exception chaining
pass
else:
return
flags = os.O_CREAT | os.O_WRONLY
if not exist_ok:
flags |= os.O_EXCL
fd = os.open(self, flags, mode)
os.close(fd)
def glob(self, pat):
"""`pat` can be an extended glob pattern, e.g. `'**/*.less'`
This code handles negations similarly to node.js' minimatch, i.e.
a leading `!` will negate the entire pattern.
"""
r = ""
negate = int(pat.startswith('!'))
i = negate
while i < len(pat):
if pat[i:i + 3] == '**/':
r += "(?:.*/)?"
i += 3
elif pat[i] == "*":
r += "[^/]*"
i += 1
elif pat[i] == ".":
r += "[.]"
i += 1
elif pat[i] == "?":
r += "."
i += 1
else:
r += pat[i]
i += 1
r += r'\Z(?ms)'
# print '\n\npat', pat
# print 'regex:', r
# print [s.relpath(self).replace('\\', '/') for s in self]
rx = re.compile(r)
def match(d):
m = rx.match(d)
return not m if negate else m
return [s for s in self if match(s.relpath(self).replace('\\', '/'))]
@doc(os.path.abspath)
def abspath(self):
return Path(os.path.abspath(self))
absolute = abspath # pathlib
def drive(self):
"""Return the drive of `self`.
"""
return self.splitdrive()[0]
def drivepath(self):
"""The path local to this drive (i.e. remove drive letter).
"""
return self.splitdrive()[1]
@doc(os.path.basename)
def basename(self):
return Path(os.path.basename(self))
@doc(os.path.commonprefix)
def commonprefix(self, *args):
return os.path.commonprefix([str(self)] + [str(a) for a in args])
@doc(os.path.dirname)
def dirname(self):
return Path(os.path.dirname(self))
@doc(os.path.exists)
def exists(self):
return os.path.exists(self)
@doc(os.path.expanduser)
def expanduser(self):
return Path(os.path.expanduser(self))
@doc(os.path.expandvars)
def expandvars(self):
return Path(os.path.expandvars(self))
@doc(os.path.getatime)
def getatime(self):
return os.path.getatime(self)
@doc(os.path.getctime)
def getctime(self):
return os.path.getctime(self)
@doc(os.path.getmtime)
def getmtime(self):
return os.path.getmtime(self)
@doc(os.path.getsize)
def getsize(self):
return os.path.getsize(self)
@doc(os.path.isabs)
def isabs(self):
return os.path.isabs(self)
@doc(os.path.isdir)
def isdir(self, *args, **kw):
return os.path.isdir(self, *args, **kw)
@doc(os.path.isfile)
def isfile(self):
return os.path.isfile(self)
@doc(os.path.islink)
def islink(self):
return os.path.islink(self)
@doc(os.path.ismount)
def ismount(self):
return os.path.ismount(self)
@doc(os.path.join)
def join(self, *args):
return Path(os.path.join(self, *args))
@doc(os.path.lexists)
def lexists(self):
return os.path.lexists(self)
@doc(os.path.normcase)
def normcase(self):
return Path(os.path.normcase(self))
@doc(os.path.normpath)
def normpath(self):
return Path(os.path.normpath(str(self)))
@doc(os.path.realpath)
def realpath(self):
return Path(os.path.realpath(self))
@doc(os.path.relpath)
def relpath(self, other=""):
return Path(os.path.relpath(str(self), str(other)))
@doc(os.path.split)
def split(self, sep=None, maxsplit=-1):
# some heuristics to determine if this is a str.split call or
# a os.split call...
sval = str(self)
if sep is not None or ' ' in sval:
return sval.split(sep or ' ', maxsplit)
return os.path.split(self)
def parts(self):
res = re.split(r"[\\/]", self)
if res and os.path.splitdrive(res[0]) == (res[0], ''):
res[0] += os.path.sep
return res
def parent_iter(self):
parts = self.abspath().normpath().normcase().parts()
for i in range(1, len(parts)):
yield Path(os.path.join(*parts[:-i]))
@property
def parents(self):
return list(self.parent_iter())
@property
def parent(self):
return self.parents[0]
@doc(os.path.splitdrive)
def splitdrive(self):
drive, pth = os.path.splitdrive(self)
return drive, Path(pth)
@doc(os.path.splitext)
def splitext(self):
return os.path.splitext(self)
@property
def ext(self):
return self.splitext()[1]
def switchext(self, ext):
return self.splitext()[0] + ext
if hasattr(os.path, 'splitunc'): # pragma: nocover
@doc(os.path.splitunc)
def splitunc(self):
return os.path.splitunc(self)
@doc(os.access)
def access(self, *args, **kw):
return os.access(self, *args, **kw)
@doc(os.chdir)
def chdir(self):
return os.chdir(self)
@contextmanager
def cd(self):
cwd = os.getcwd()
try:
self.chdir()
yield self
finally:
os.chdir(cwd)
@doc(os.chmod)
def chmod(self, *args, **kw):
return os.chmod(self, *args, **kw)
def list(self, filterfn=lambda x: True):
"""Return all direct descendands of directory `self` for which
`filterfn` returns True.
"""
return [self / p for p in self.listdir() if filterfn(self / p)]
@doc(os.listdir)
def listdir(self):
return [Path(p) for p in os.listdir(self)]
def subdirs(self):
"""Return all direct sub-directories.
"""
return self.list(lambda p: p.isdir())
def files(self):
"""Return all files in directory.
"""
return self.list(lambda p: p.isfile())
@doc(os.lstat)
def lstat(self):
return os.lstat(self)
@doc(os.makedirs)
def makedirs(self, path=None, mode=0o777):
pth = os.path.join(self, path) if path else self
try:
os.makedirs(pth, mode)
except OSError:
pass
return Path(pth)
@doc(os.mkdir)
def mkdir(self, path, mode=0o777):
pth = os.path.join(self, path)
os.mkdir(pth, mode)
return Path(pth)
@doc(os.remove)
def remove(self):
return os.remove(self)
@doc(os.removedirs)
def removedirs(self):
return os.removedirs(self)
@doc(shutil.move)
def move(self, dst):
return shutil.move(self, dst)
@doc(os.rename)
def rename(self, *args, **kw):
return os.rename(self, *args, **kw)
@doc(os.renames)
def renames(self, *args, **kw):
return os.renames(self, *args, **kw)
@doc(os.rmdir)
def rmdir(self):
return os.rmdir(self)
if hasattr(os, 'startfile'): # pragma: nocover
@doc(os.startfile)
def startfile(self, *args, **kw):
return os.startfile(self, *args, **kw)
@doc(os.stat)
def stat(self, *args, **kw):
return os.stat(self, *args, **kw)
@doc(os.utime)
def utime(self, time=None):
os.utime(self, time)
return self.stat()
def __add__(self, other):
return Path(str(self) + str(other))
|
datakortet/dkfileutils
|
dkfileutils/pfind.py
|
pfindall
|
python
|
def pfindall(path, *fnames):
wd = os.path.abspath(path)
assert os.path.isdir(wd)
def parents():
"""yield successive parent directories
"""
parent = wd
yield parent
while 1:
parent, dirname = os.path.split(parent)
if not dirname:
return
yield parent
for d in parents():
curdirlist = os.listdir(d)
for fname in fnames:
if fname in curdirlist:
yield fname, os.path.normcase(os.path.join(d, fname))
|
Find all fnames in the closest ancestor directory.
For the purposes of this function, we are our own closest ancestor.
I.e. given the structure::
.
`-- a
|-- b
| |-- c
| | `-- x.txt
| `-- x.txt
`-- y.txt
the call::
dict(pfindall('a/b/c', 'x.txt', 'y.txt'))
will return::
{
'x.txt': 'a/b/c/x.txt',
'y.txt': 'a/y.txt'
}
``a/b/x.txt`` is not returned, since ``a/b/c/x.txt`` is the "closest"
``x.txt`` when starting from ``a/b/c`` (note: pfindall only looks
"upwards", ie. towards the root).
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/pfind.py#L10-L56
|
[
"def parents():\n \"\"\"yield successive parent directories\n \"\"\"\n parent = wd\n yield parent\n while 1:\n parent, dirname = os.path.split(parent)\n if not dirname:\n return\n yield parent\n"
] |
#!/usr/bin/python
"""CLI usage: ``pfind path filename`` will find the closest ancestor directory
conataining filename (used for finding syncspec.txt and config files).
"""
from __future__ import print_function
import os
import sys
def pfind(path, *fnames):
"""Find the first fname in the closest ancestor directory.
For the purposes of this function, we are our own closest ancestor, i.e.
given the structure::
/srv
|-- myapp
| |-- __init__.py
| `-- myapp.py
`-- setup.py
then both ``pfind('/srv', 'setup.py')`` and
``pfind('/srv/myapp', 'setup.py')`` will return ``/srv/setup.py``
"""
for _fname, fpath in pfindall(path, *fnames):
return fpath
return None
if __name__ == "__main__": # pragma: nocover
_path, filename = sys.argv[1], sys.argv[2]
print(pfind(_path, filename))
|
datakortet/dkfileutils
|
dkfileutils/listfiles.py
|
read_skipfile
|
python
|
def read_skipfile(dirname='.', defaults=None):
if defaults is None:
defaults = ['Makefile', 'make.bat', 'atlassian-ide-plugin.xml']
try:
return defaults + open(
os.path.join(dirname, SKIPFILE_NAME)
).read().splitlines()
except IOError:
return defaults
|
The .skipfile should contain one entry per line,
listing files/directories that should be skipped by
:func:`list_files`.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/listfiles.py#L13-L25
| null |
# -*- coding: utf-8 -*-
"""List interesting files.
"""
from __future__ import print_function
import argparse
import os
from hashlib import md5
from .path import Path
SKIPFILE_NAME = '.skipfile'
def list_files(dirname='.', digest=True):
"""Yield (digest, fname) tuples for all interesting files
in `dirname`.
"""
skipdirs = ['__pycache__', '.git', '.svn', 'htmlcov', 'dist', 'build',
'.idea', 'tasks', 'static', 'media', 'data', 'migrations',
'.doctrees', '_static', 'node_modules', 'external',
'jobs', 'tryout', 'tmp', '_coverage',
]
skipexts = ['.pyc', '~', '.svg', '.txt', '.TTF', '.tmp', '.errmail',
'.email', '.bat', '.dll', '.exe', '.Dll', '.jpg', '.gif',
'.png', '.ico', '.db', '.md5']
dirname = str(dirname)
skipfiles = read_skipfile(dirname)
def clean_dirs(directories):
"""Remove directories that should be skipped.
"""
for d in directories:
if d.endswith('.egg-info'):
directories.remove(d)
for d in skipdirs:
if d in directories:
directories.remove(d)
def keep_file(filename, filepath):
"""Returns False if the file should be skipped.
"""
if filename.startswith('.'):
return False
if filepath in skipfiles:
return False
for ext in skipexts:
if filename.endswith(ext):
return False
return True
for root, dirs, files in os.walk(os.path.abspath(dirname)):
clean_dirs(dirs)
for fname in files:
relpth = os.path.relpath(
os.path.join(root, fname),
dirname
).replace('\\', '/')
parts = Path(relpth).parts()
if not keep_file(fname, relpth) or \
any(p.startswith('.') for p in parts):
continue
pth = os.path.join(dirname, relpth)
if digest:
yield md5(open(pth, 'rb').read()).hexdigest(), relpth
else:
yield relpth
def main(): # pragma: nocover
"""Print checksum and file name for all files in the directory.
"""
p = argparse.ArgumentParser(add_help="Recursively list interesting files.")
p.add_argument(
'directory', nargs="?", default="",
help="The directory to process (current dir if omitted)."
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="Increase verbosity."
)
args = p.parse_args()
args.curdir = os.getcwd()
if not args.directory:
args.direcotry = args.curdir
if args.verbose:
print(args)
for chsm, fname in list_files(args.directory):
print(chsm, fname)
if __name__ == "__main__": # pragma: nocover
main()
|
datakortet/dkfileutils
|
dkfileutils/listfiles.py
|
list_files
|
python
|
def list_files(dirname='.', digest=True):
skipdirs = ['__pycache__', '.git', '.svn', 'htmlcov', 'dist', 'build',
'.idea', 'tasks', 'static', 'media', 'data', 'migrations',
'.doctrees', '_static', 'node_modules', 'external',
'jobs', 'tryout', 'tmp', '_coverage',
]
skipexts = ['.pyc', '~', '.svg', '.txt', '.TTF', '.tmp', '.errmail',
'.email', '.bat', '.dll', '.exe', '.Dll', '.jpg', '.gif',
'.png', '.ico', '.db', '.md5']
dirname = str(dirname)
skipfiles = read_skipfile(dirname)
def clean_dirs(directories):
"""Remove directories that should be skipped.
"""
for d in directories:
if d.endswith('.egg-info'):
directories.remove(d)
for d in skipdirs:
if d in directories:
directories.remove(d)
def keep_file(filename, filepath):
"""Returns False if the file should be skipped.
"""
if filename.startswith('.'):
return False
if filepath in skipfiles:
return False
for ext in skipexts:
if filename.endswith(ext):
return False
return True
for root, dirs, files in os.walk(os.path.abspath(dirname)):
clean_dirs(dirs)
for fname in files:
relpth = os.path.relpath(
os.path.join(root, fname),
dirname
).replace('\\', '/')
parts = Path(relpth).parts()
if not keep_file(fname, relpth) or \
any(p.startswith('.') for p in parts):
continue
pth = os.path.join(dirname, relpth)
if digest:
yield md5(open(pth, 'rb').read()).hexdigest(), relpth
else:
yield relpth
|
Yield (digest, fname) tuples for all interesting files
in `dirname`.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/listfiles.py#L28-L82
|
[
"def read_skipfile(dirname='.', defaults=None):\n \"\"\"The .skipfile should contain one entry per line,\n listing files/directories that should be skipped by\n :func:`list_files`.\n \"\"\"\n if defaults is None:\n defaults = ['Makefile', 'make.bat', 'atlassian-ide-plugin.xml']\n try:\n return defaults + open(\n os.path.join(dirname, SKIPFILE_NAME)\n ).read().splitlines()\n except IOError:\n return defaults\n",
"def clean_dirs(directories):\n \"\"\"Remove directories that should be skipped.\n \"\"\"\n for d in directories:\n if d.endswith('.egg-info'):\n directories.remove(d)\n for d in skipdirs:\n if d in directories:\n directories.remove(d)\n",
"def keep_file(filename, filepath):\n \"\"\"Returns False if the file should be skipped.\n \"\"\"\n if filename.startswith('.'):\n return False\n if filepath in skipfiles:\n return False\n for ext in skipexts:\n if filename.endswith(ext):\n return False\n return True\n"
] |
# -*- coding: utf-8 -*-
"""List interesting files.
"""
from __future__ import print_function
import argparse
import os
from hashlib import md5
from .path import Path
SKIPFILE_NAME = '.skipfile'
def read_skipfile(dirname='.', defaults=None):
"""The .skipfile should contain one entry per line,
listing files/directories that should be skipped by
:func:`list_files`.
"""
if defaults is None:
defaults = ['Makefile', 'make.bat', 'atlassian-ide-plugin.xml']
try:
return defaults + open(
os.path.join(dirname, SKIPFILE_NAME)
).read().splitlines()
except IOError:
return defaults
def main(): # pragma: nocover
"""Print checksum and file name for all files in the directory.
"""
p = argparse.ArgumentParser(add_help="Recursively list interesting files.")
p.add_argument(
'directory', nargs="?", default="",
help="The directory to process (current dir if omitted)."
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="Increase verbosity."
)
args = p.parse_args()
args.curdir = os.getcwd()
if not args.directory:
args.direcotry = args.curdir
if args.verbose:
print(args)
for chsm, fname in list_files(args.directory):
print(chsm, fname)
if __name__ == "__main__": # pragma: nocover
main()
|
datakortet/dkfileutils
|
dkfileutils/listfiles.py
|
main
|
python
|
def main(): # pragma: nocover
p = argparse.ArgumentParser(add_help="Recursively list interesting files.")
p.add_argument(
'directory', nargs="?", default="",
help="The directory to process (current dir if omitted)."
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="Increase verbosity."
)
args = p.parse_args()
args.curdir = os.getcwd()
if not args.directory:
args.direcotry = args.curdir
if args.verbose:
print(args)
for chsm, fname in list_files(args.directory):
print(chsm, fname)
|
Print checksum and file name for all files in the directory.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/listfiles.py#L85-L106
|
[
"def list_files(dirname='.', digest=True):\n \"\"\"Yield (digest, fname) tuples for all interesting files\n in `dirname`.\n \"\"\"\n skipdirs = ['__pycache__', '.git', '.svn', 'htmlcov', 'dist', 'build',\n '.idea', 'tasks', 'static', 'media', 'data', 'migrations',\n '.doctrees', '_static', 'node_modules', 'external',\n 'jobs', 'tryout', 'tmp', '_coverage',\n ]\n skipexts = ['.pyc', '~', '.svg', '.txt', '.TTF', '.tmp', '.errmail',\n '.email', '.bat', '.dll', '.exe', '.Dll', '.jpg', '.gif',\n '.png', '.ico', '.db', '.md5']\n dirname = str(dirname)\n skipfiles = read_skipfile(dirname)\n\n def clean_dirs(directories):\n \"\"\"Remove directories that should be skipped.\n \"\"\"\n for d in directories:\n if d.endswith('.egg-info'):\n directories.remove(d)\n for d in skipdirs:\n if d in directories:\n directories.remove(d)\n\n def keep_file(filename, filepath):\n \"\"\"Returns False if the file should be skipped.\n \"\"\"\n if filename.startswith('.'):\n return False\n if filepath in skipfiles:\n return False\n for ext in skipexts:\n if filename.endswith(ext):\n return False\n return True\n\n for root, dirs, files in os.walk(os.path.abspath(dirname)):\n clean_dirs(dirs)\n for fname in files:\n relpth = os.path.relpath(\n os.path.join(root, fname),\n dirname\n ).replace('\\\\', '/')\n\n parts = Path(relpth).parts()\n if not keep_file(fname, relpth) or \\\n any(p.startswith('.') for p in parts):\n continue\n\n pth = os.path.join(dirname, relpth)\n if digest:\n yield md5(open(pth, 'rb').read()).hexdigest(), relpth\n else:\n yield relpth\n"
] |
# -*- coding: utf-8 -*-
"""List interesting files.
"""
from __future__ import print_function
import argparse
import os
from hashlib import md5
from .path import Path
SKIPFILE_NAME = '.skipfile'
def read_skipfile(dirname='.', defaults=None):
"""The .skipfile should contain one entry per line,
listing files/directories that should be skipped by
:func:`list_files`.
"""
if defaults is None:
defaults = ['Makefile', 'make.bat', 'atlassian-ide-plugin.xml']
try:
return defaults + open(
os.path.join(dirname, SKIPFILE_NAME)
).read().splitlines()
except IOError:
return defaults
def list_files(dirname='.', digest=True):
"""Yield (digest, fname) tuples for all interesting files
in `dirname`.
"""
skipdirs = ['__pycache__', '.git', '.svn', 'htmlcov', 'dist', 'build',
'.idea', 'tasks', 'static', 'media', 'data', 'migrations',
'.doctrees', '_static', 'node_modules', 'external',
'jobs', 'tryout', 'tmp', '_coverage',
]
skipexts = ['.pyc', '~', '.svg', '.txt', '.TTF', '.tmp', '.errmail',
'.email', '.bat', '.dll', '.exe', '.Dll', '.jpg', '.gif',
'.png', '.ico', '.db', '.md5']
dirname = str(dirname)
skipfiles = read_skipfile(dirname)
def clean_dirs(directories):
"""Remove directories that should be skipped.
"""
for d in directories:
if d.endswith('.egg-info'):
directories.remove(d)
for d in skipdirs:
if d in directories:
directories.remove(d)
def keep_file(filename, filepath):
"""Returns False if the file should be skipped.
"""
if filename.startswith('.'):
return False
if filepath in skipfiles:
return False
for ext in skipexts:
if filename.endswith(ext):
return False
return True
for root, dirs, files in os.walk(os.path.abspath(dirname)):
clean_dirs(dirs)
for fname in files:
relpth = os.path.relpath(
os.path.join(root, fname),
dirname
).replace('\\', '/')
parts = Path(relpth).parts()
if not keep_file(fname, relpth) or \
any(p.startswith('.') for p in parts):
continue
pth = os.path.join(dirname, relpth)
if digest:
yield md5(open(pth, 'rb').read()).hexdigest(), relpth
else:
yield relpth
if __name__ == "__main__": # pragma: nocover
main()
|
datakortet/dkfileutils
|
dkfileutils/which.py
|
get_path_directories
|
python
|
def get_path_directories():
pth = os.environ['PATH']
if sys.platform == 'win32' and os.environ.get("BASH"):
# winbash has a bug..
if pth[1] == ';': # pragma: nocover
pth = pth.replace(';', ':', 1)
return [p.strip() for p in pth.split(os.pathsep) if p.strip()]
|
Return a list of all the directories on the path.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/which.py#L18-L26
| null |
# -*- coding: utf-8 -*-
"""Print where on the path an executable is located.
"""
from __future__ import print_function
import sys
import os
from stat import ST_MODE, S_IXUSR, S_IXGRP, S_IXOTH
def get_executable(name):
"""Return the first executable on the path that matches `name`.
"""
for result in which(name):
return result
return None
def is_executable(fname):
"""Check if a file is executable.
"""
return os.stat(fname)[ST_MODE] & (S_IXUSR | S_IXGRP | S_IXOTH)
def _listdir(pth, extensions):
"""Non-raising listdir."""
try:
return [fname for fname in os.listdir(pth)
if os.path.splitext(fname)[1] in extensions]
except OSError: # pragma: nocover
pass
def _normalize(pth):
return os.path.normcase(os.path.normpath(pth))
def which(filename, interactive=False, verbose=False):
"""Yield all executable files on path that matches `filename`.
"""
exe = [e.lower() for e in os.environ.get('PATHEXT', '').split(';')]
if sys.platform != 'win32': # pragma: nocover
exe.append('')
name, ext = os.path.splitext(filename)
has_extension = bool(ext)
if has_extension and ext.lower() not in exe:
raise ValueError("which can only search for executable files")
def match(filenames):
"""Returns the sorted subset of ``filenames`` that matches ``filename``.
"""
res = set()
for fname in filenames:
if fname == filename: # pragma: nocover
res.add(fname) # exact match
continue
fname_name, fname_ext = os.path.splitext(fname)
if fname_name == name and fname_ext.lower() in exe: # pragma: nocover
res.add(fname)
return sorted(res)
returnset = set()
found = False
for pth in get_path_directories():
if verbose: # pragma: nocover
print('checking pth..')
fnames = _listdir(pth, exe)
if not fnames:
continue
for m in match(fnames):
found_file = _normalize(os.path.join(pth, m))
if found_file not in returnset: # pragma: nocover
if is_executable(found_file):
yield found_file
returnset.add(found_file)
found = True
if not found and interactive: # pragma: nocover
print("Couldn't find %r anywhere on the path.." % filename)
sys.exit(1)
if __name__ == "__main__": # pragma: nocover
_args = sys.argv
for _fname in which(_args[1], interactive=True, verbose='-v' in _args):
print(_fname)
sys.exit(0)
|
datakortet/dkfileutils
|
dkfileutils/which.py
|
_listdir
|
python
|
def _listdir(pth, extensions):
try:
return [fname for fname in os.listdir(pth)
if os.path.splitext(fname)[1] in extensions]
except OSError: # pragma: nocover
pass
|
Non-raising listdir.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/which.py#L35-L41
| null |
# -*- coding: utf-8 -*-
"""Print where on the path an executable is located.
"""
from __future__ import print_function
import sys
import os
from stat import ST_MODE, S_IXUSR, S_IXGRP, S_IXOTH
def get_executable(name):
"""Return the first executable on the path that matches `name`.
"""
for result in which(name):
return result
return None
def get_path_directories():
"""Return a list of all the directories on the path.
"""
pth = os.environ['PATH']
if sys.platform == 'win32' and os.environ.get("BASH"):
# winbash has a bug..
if pth[1] == ';': # pragma: nocover
pth = pth.replace(';', ':', 1)
return [p.strip() for p in pth.split(os.pathsep) if p.strip()]
def is_executable(fname):
"""Check if a file is executable.
"""
return os.stat(fname)[ST_MODE] & (S_IXUSR | S_IXGRP | S_IXOTH)
def _normalize(pth):
return os.path.normcase(os.path.normpath(pth))
def which(filename, interactive=False, verbose=False):
"""Yield all executable files on path that matches `filename`.
"""
exe = [e.lower() for e in os.environ.get('PATHEXT', '').split(';')]
if sys.platform != 'win32': # pragma: nocover
exe.append('')
name, ext = os.path.splitext(filename)
has_extension = bool(ext)
if has_extension and ext.lower() not in exe:
raise ValueError("which can only search for executable files")
def match(filenames):
"""Returns the sorted subset of ``filenames`` that matches ``filename``.
"""
res = set()
for fname in filenames:
if fname == filename: # pragma: nocover
res.add(fname) # exact match
continue
fname_name, fname_ext = os.path.splitext(fname)
if fname_name == name and fname_ext.lower() in exe: # pragma: nocover
res.add(fname)
return sorted(res)
returnset = set()
found = False
for pth in get_path_directories():
if verbose: # pragma: nocover
print('checking pth..')
fnames = _listdir(pth, exe)
if not fnames:
continue
for m in match(fnames):
found_file = _normalize(os.path.join(pth, m))
if found_file not in returnset: # pragma: nocover
if is_executable(found_file):
yield found_file
returnset.add(found_file)
found = True
if not found and interactive: # pragma: nocover
print("Couldn't find %r anywhere on the path.." % filename)
sys.exit(1)
if __name__ == "__main__": # pragma: nocover
_args = sys.argv
for _fname in which(_args[1], interactive=True, verbose='-v' in _args):
print(_fname)
sys.exit(0)
|
datakortet/dkfileutils
|
dkfileutils/which.py
|
which
|
python
|
def which(filename, interactive=False, verbose=False):
exe = [e.lower() for e in os.environ.get('PATHEXT', '').split(';')]
if sys.platform != 'win32': # pragma: nocover
exe.append('')
name, ext = os.path.splitext(filename)
has_extension = bool(ext)
if has_extension and ext.lower() not in exe:
raise ValueError("which can only search for executable files")
def match(filenames):
"""Returns the sorted subset of ``filenames`` that matches ``filename``.
"""
res = set()
for fname in filenames:
if fname == filename: # pragma: nocover
res.add(fname) # exact match
continue
fname_name, fname_ext = os.path.splitext(fname)
if fname_name == name and fname_ext.lower() in exe: # pragma: nocover
res.add(fname)
return sorted(res)
returnset = set()
found = False
for pth in get_path_directories():
if verbose: # pragma: nocover
print('checking pth..')
fnames = _listdir(pth, exe)
if not fnames:
continue
for m in match(fnames):
found_file = _normalize(os.path.join(pth, m))
if found_file not in returnset: # pragma: nocover
if is_executable(found_file):
yield found_file
returnset.add(found_file)
found = True
if not found and interactive: # pragma: nocover
print("Couldn't find %r anywhere on the path.." % filename)
sys.exit(1)
|
Yield all executable files on path that matches `filename`.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/which.py#L48-L93
|
[
"def _normalize(pth):\n return os.path.normcase(os.path.normpath(pth))\n",
"def _listdir(pth, extensions):\n \"\"\"Non-raising listdir.\"\"\"\n try:\n return [fname for fname in os.listdir(pth)\n if os.path.splitext(fname)[1] in extensions]\n except OSError: # pragma: nocover\n pass\n",
"def is_executable(fname):\n \"\"\"Check if a file is executable.\n \"\"\"\n return os.stat(fname)[ST_MODE] & (S_IXUSR | S_IXGRP | S_IXOTH)\n",
"def get_path_directories():\n \"\"\"Return a list of all the directories on the path.\n \"\"\"\n pth = os.environ['PATH']\n if sys.platform == 'win32' and os.environ.get(\"BASH\"):\n # winbash has a bug..\n if pth[1] == ';': # pragma: nocover\n pth = pth.replace(';', ':', 1)\n return [p.strip() for p in pth.split(os.pathsep) if p.strip()]\n",
"def match(filenames):\n \"\"\"Returns the sorted subset of ``filenames`` that matches ``filename``.\n \"\"\"\n res = set()\n for fname in filenames:\n if fname == filename: # pragma: nocover\n res.add(fname) # exact match\n continue\n fname_name, fname_ext = os.path.splitext(fname)\n if fname_name == name and fname_ext.lower() in exe: # pragma: nocover\n res.add(fname)\n return sorted(res)\n"
] |
# -*- coding: utf-8 -*-
"""Print where on the path an executable is located.
"""
from __future__ import print_function
import sys
import os
from stat import ST_MODE, S_IXUSR, S_IXGRP, S_IXOTH
def get_executable(name):
"""Return the first executable on the path that matches `name`.
"""
for result in which(name):
return result
return None
def get_path_directories():
"""Return a list of all the directories on the path.
"""
pth = os.environ['PATH']
if sys.platform == 'win32' and os.environ.get("BASH"):
# winbash has a bug..
if pth[1] == ';': # pragma: nocover
pth = pth.replace(';', ':', 1)
return [p.strip() for p in pth.split(os.pathsep) if p.strip()]
def is_executable(fname):
"""Check if a file is executable.
"""
return os.stat(fname)[ST_MODE] & (S_IXUSR | S_IXGRP | S_IXOTH)
def _listdir(pth, extensions):
"""Non-raising listdir."""
try:
return [fname for fname in os.listdir(pth)
if os.path.splitext(fname)[1] in extensions]
except OSError: # pragma: nocover
pass
def _normalize(pth):
return os.path.normcase(os.path.normpath(pth))
if __name__ == "__main__": # pragma: nocover
_args = sys.argv
for _fname in which(_args[1], interactive=True, verbose='-v' in _args):
print(_fname)
sys.exit(0)
|
datakortet/dkfileutils
|
tasks.py
|
build_js
|
python
|
def build_js(ctx, force=False):
for fname in JSX_FILENAMES:
jstools.babel(
ctx,
'{pkg.source_js}/' + fname,
'{pkg.django_static}/{pkg.name}/js/' + fname + '.js',
force=force
)
|
Build all javascript files.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/tasks.py#L79-L88
| null |
# -*- coding: utf-8 -*-
"""
Base version of package/tasks.py, created by
package/root/dir> dk-tasklib install
(it should reside in the root directory of your package)
This file defines tasks for the Invoke tool: http://www.pyinvoke.org
Basic usage::
inv -l # list all available tasks
inv build -f # build everything, forcefully
inv build --docs # only build the docs
dk-tasklib is a library of basic tasks that tries to automate common tasks.
dk-tasklib will attempt to install any tools/libraries/etc. that are required,
e.g. when running the task to compile x.less to x.css, it will check that
the lessc compiler is installed (and if not it will attempt to install it).
This file is an initial skeleton, you are supposed to edit and add to it so it
will fit your use case.
"""
# pragma: nocover
from __future__ import print_function
import os
import warnings
from dkfileutils.changed import changed
from dkfileutils.path import Path
from dktasklib.wintask import task
from invoke import Collection
from dktasklib import docs as doctools
from dktasklib import jstools
from dktasklib import lessc
from dktasklib import version, upversion
from dktasklib.manage import collectstatic
from dktasklib.package import Package, package
from dktasklib.watch import Watcher
from dktasklib.publish import publish
#: where tasks.py is located (root of package)
DIRNAME = Path(os.path.dirname(__file__))
# collectstatic
# --------------
# Specify which settings file should be used when running
# `python manage.py collectstatic` (must be on the path or package root
# directory).
DJANGO_SETTINGS_MODULE = ''
# .less
# ------
# there should be a mypkg/mypkg/less/mypkg.less file that imports any other
# needed sources
# .jsx (es6 source)
# ------------------
# list any .jsx files here. Only filename.jsx (don't include the path).
# The files should reside in mypkg/mypkg/js/ directory.
JSX_FILENAMES = []
# ============================================================================
# autodoc is in a separate process, so can't use settings.configure().
HAVE_SETTINGS = bool(DJANGO_SETTINGS_MODULE)
if not HAVE_SETTINGS and (DIRNAME / 'settings.py').exists():
# look for a dummy settings.py module in the root of the package.
DJANGO_SETTINGS_MODULE = 'settings'
if DJANGO_SETTINGS_MODULE:
os.environ['DJANGO_SETTINGS_MODULE'] = DJANGO_SETTINGS_MODULE
WARN_ABOUT_SETTINGS = not bool(DJANGO_SETTINGS_MODULE)
@task
@task
def build(ctx, less=False, docs=False, js=False, force=False):
"""Build everything and collectstatic.
"""
specified = any([less, docs, js])
buildall = not specified
if buildall or less:
less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less'
if less_fname.exists():
lessc.LessRule(
ctx,
src='{pkg.source_less}/{pkg.name}.less',
dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css',
force=force
)
elif less:
print("WARNING: build --less specified, but no file at:", less_fname)
if buildall or docs:
if WARN_ABOUT_SETTINGS:
warnings.warn(
"autodoc might need a dummy settings file in the root of "
"your package. Since it runs in a separate process you cannot"
"use settings.configure()"
)
doctools.build(ctx, force=force)
if buildall or js:
build_js(ctx, force)
if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)):
collectstatic(ctx, DJANGO_SETTINGS_MODULE)
@task
def watch(ctx):
"""Automatically run build whenever a relevant file changes.
"""
watcher = Watcher(ctx)
watcher.watch_directory(
path='{pkg.source_less}', ext='.less',
action=lambda e: build(ctx, less=True)
)
watcher.watch_directory(
path='{pkg.source_js}', ext='.jsx',
action=lambda e: build(ctx, js=True)
)
watcher.watch_directory(
path='{pkg.docs}', ext='.rst',
action=lambda e: build(ctx, docs=True)
)
watcher.start()
# individual tasks that can be run from this project
ns = Collection(
build,
watch,
build_js,
lessc,
doctools,
version, upversion,
package,
collectstatic,
publish,
)
ns.configure({
'pkg': Package(),
'run': {
'echo': True
}
})
|
datakortet/dkfileutils
|
tasks.py
|
build
|
python
|
def build(ctx, less=False, docs=False, js=False, force=False):
specified = any([less, docs, js])
buildall = not specified
if buildall or less:
less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less'
if less_fname.exists():
lessc.LessRule(
ctx,
src='{pkg.source_less}/{pkg.name}.less',
dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css',
force=force
)
elif less:
print("WARNING: build --less specified, but no file at:", less_fname)
if buildall or docs:
if WARN_ABOUT_SETTINGS:
warnings.warn(
"autodoc might need a dummy settings file in the root of "
"your package. Since it runs in a separate process you cannot"
"use settings.configure()"
)
doctools.build(ctx, force=force)
if buildall or js:
build_js(ctx, force)
if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)):
collectstatic(ctx, DJANGO_SETTINGS_MODULE)
|
Build everything and collectstatic.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/tasks.py#L92-L123
| null |
# -*- coding: utf-8 -*-
"""
Base version of package/tasks.py, created by
package/root/dir> dk-tasklib install
(it should reside in the root directory of your package)
This file defines tasks for the Invoke tool: http://www.pyinvoke.org
Basic usage::
inv -l # list all available tasks
inv build -f # build everything, forcefully
inv build --docs # only build the docs
dk-tasklib is a library of basic tasks that tries to automate common tasks.
dk-tasklib will attempt to install any tools/libraries/etc. that are required,
e.g. when running the task to compile x.less to x.css, it will check that
the lessc compiler is installed (and if not it will attempt to install it).
This file is an initial skeleton, you are supposed to edit and add to it so it
will fit your use case.
"""
# pragma: nocover
from __future__ import print_function
import os
import warnings
from dkfileutils.changed import changed
from dkfileutils.path import Path
from dktasklib.wintask import task
from invoke import Collection
from dktasklib import docs as doctools
from dktasklib import jstools
from dktasklib import lessc
from dktasklib import version, upversion
from dktasklib.manage import collectstatic
from dktasklib.package import Package, package
from dktasklib.watch import Watcher
from dktasklib.publish import publish
#: where tasks.py is located (root of package)
DIRNAME = Path(os.path.dirname(__file__))
# collectstatic
# --------------
# Specify which settings file should be used when running
# `python manage.py collectstatic` (must be on the path or package root
# directory).
DJANGO_SETTINGS_MODULE = ''
# .less
# ------
# there should be a mypkg/mypkg/less/mypkg.less file that imports any other
# needed sources
# .jsx (es6 source)
# ------------------
# list any .jsx files here. Only filename.jsx (don't include the path).
# The files should reside in mypkg/mypkg/js/ directory.
JSX_FILENAMES = []
# ============================================================================
# autodoc is in a separate process, so can't use settings.configure().
HAVE_SETTINGS = bool(DJANGO_SETTINGS_MODULE)
if not HAVE_SETTINGS and (DIRNAME / 'settings.py').exists():
# look for a dummy settings.py module in the root of the package.
DJANGO_SETTINGS_MODULE = 'settings'
if DJANGO_SETTINGS_MODULE:
os.environ['DJANGO_SETTINGS_MODULE'] = DJANGO_SETTINGS_MODULE
WARN_ABOUT_SETTINGS = not bool(DJANGO_SETTINGS_MODULE)
@task
def build_js(ctx, force=False):
"""Build all javascript files.
"""
for fname in JSX_FILENAMES:
jstools.babel(
ctx,
'{pkg.source_js}/' + fname,
'{pkg.django_static}/{pkg.name}/js/' + fname + '.js',
force=force
)
@task
@task
def watch(ctx):
"""Automatically run build whenever a relevant file changes.
"""
watcher = Watcher(ctx)
watcher.watch_directory(
path='{pkg.source_less}', ext='.less',
action=lambda e: build(ctx, less=True)
)
watcher.watch_directory(
path='{pkg.source_js}', ext='.jsx',
action=lambda e: build(ctx, js=True)
)
watcher.watch_directory(
path='{pkg.docs}', ext='.rst',
action=lambda e: build(ctx, docs=True)
)
watcher.start()
# individual tasks that can be run from this project
ns = Collection(
build,
watch,
build_js,
lessc,
doctools,
version, upversion,
package,
collectstatic,
publish,
)
ns.configure({
'pkg': Package(),
'run': {
'echo': True
}
})
|
datakortet/dkfileutils
|
tasks.py
|
watch
|
python
|
def watch(ctx):
watcher = Watcher(ctx)
watcher.watch_directory(
path='{pkg.source_less}', ext='.less',
action=lambda e: build(ctx, less=True)
)
watcher.watch_directory(
path='{pkg.source_js}', ext='.jsx',
action=lambda e: build(ctx, js=True)
)
watcher.watch_directory(
path='{pkg.docs}', ext='.rst',
action=lambda e: build(ctx, docs=True)
)
watcher.start()
|
Automatically run build whenever a relevant file changes.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/tasks.py#L127-L143
| null |
# -*- coding: utf-8 -*-
"""
Base version of package/tasks.py, created by
package/root/dir> dk-tasklib install
(it should reside in the root directory of your package)
This file defines tasks for the Invoke tool: http://www.pyinvoke.org
Basic usage::
inv -l # list all available tasks
inv build -f # build everything, forcefully
inv build --docs # only build the docs
dk-tasklib is a library of basic tasks that tries to automate common tasks.
dk-tasklib will attempt to install any tools/libraries/etc. that are required,
e.g. when running the task to compile x.less to x.css, it will check that
the lessc compiler is installed (and if not it will attempt to install it).
This file is an initial skeleton, you are supposed to edit and add to it so it
will fit your use case.
"""
# pragma: nocover
from __future__ import print_function
import os
import warnings
from dkfileutils.changed import changed
from dkfileutils.path import Path
from dktasklib.wintask import task
from invoke import Collection
from dktasklib import docs as doctools
from dktasklib import jstools
from dktasklib import lessc
from dktasklib import version, upversion
from dktasklib.manage import collectstatic
from dktasklib.package import Package, package
from dktasklib.watch import Watcher
from dktasklib.publish import publish
#: where tasks.py is located (root of package)
DIRNAME = Path(os.path.dirname(__file__))
# collectstatic
# --------------
# Specify which settings file should be used when running
# `python manage.py collectstatic` (must be on the path or package root
# directory).
DJANGO_SETTINGS_MODULE = ''
# .less
# ------
# there should be a mypkg/mypkg/less/mypkg.less file that imports any other
# needed sources
# .jsx (es6 source)
# ------------------
# list any .jsx files here. Only filename.jsx (don't include the path).
# The files should reside in mypkg/mypkg/js/ directory.
JSX_FILENAMES = []
# ============================================================================
# autodoc is in a separate process, so can't use settings.configure().
HAVE_SETTINGS = bool(DJANGO_SETTINGS_MODULE)
if not HAVE_SETTINGS and (DIRNAME / 'settings.py').exists():
# look for a dummy settings.py module in the root of the package.
DJANGO_SETTINGS_MODULE = 'settings'
if DJANGO_SETTINGS_MODULE:
os.environ['DJANGO_SETTINGS_MODULE'] = DJANGO_SETTINGS_MODULE
WARN_ABOUT_SETTINGS = not bool(DJANGO_SETTINGS_MODULE)
@task
def build_js(ctx, force=False):
"""Build all javascript files.
"""
for fname in JSX_FILENAMES:
jstools.babel(
ctx,
'{pkg.source_js}/' + fname,
'{pkg.django_static}/{pkg.name}/js/' + fname + '.js',
force=force
)
@task
def build(ctx, less=False, docs=False, js=False, force=False):
"""Build everything and collectstatic.
"""
specified = any([less, docs, js])
buildall = not specified
if buildall or less:
less_fname = ctx.pkg.source_less / ctx.pkg.name + '.less'
if less_fname.exists():
lessc.LessRule(
ctx,
src='{pkg.source_less}/{pkg.name}.less',
dst='{pkg.django_static}/{pkg.name}/css/{pkg.name}-{version}.min.css',
force=force
)
elif less:
print("WARNING: build --less specified, but no file at:", less_fname)
if buildall or docs:
if WARN_ABOUT_SETTINGS:
warnings.warn(
"autodoc might need a dummy settings file in the root of "
"your package. Since it runs in a separate process you cannot"
"use settings.configure()"
)
doctools.build(ctx, force=force)
if buildall or js:
build_js(ctx, force)
if HAVE_SETTINGS and (force or changed(ctx.pkg.django_static)):
collectstatic(ctx, DJANGO_SETTINGS_MODULE)
@task
# individual tasks that can be run from this project
ns = Collection(
build,
watch,
build_js,
lessc,
doctools,
version, upversion,
package,
collectstatic,
publish,
)
ns.configure({
'pkg': Package(),
'run': {
'echo': True
}
})
|
datakortet/dkfileutils
|
dkfileutils/changed.py
|
digest
|
python
|
def digest(dirname, glob=None):
md5 = hashlib.md5()
if glob is None:
fnames = [fname for _, fname in list_files(Path(dirname))]
for fname in sorted(fnames):
fname = os.path.join(dirname, fname)
md5.update(open(fname, 'rb').read())
else:
fnames = Path(dirname).glob(glob)
for fname in sorted(fnames):
md5.update(fname.open('rb').read())
return md5.hexdigest()
|
Returns the md5 digest of all interesting files (or glob) in `dirname`.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/changed.py#L12-L25
|
[
"def list_files(dirname='.', digest=True):\n \"\"\"Yield (digest, fname) tuples for all interesting files\n in `dirname`.\n \"\"\"\n skipdirs = ['__pycache__', '.git', '.svn', 'htmlcov', 'dist', 'build',\n '.idea', 'tasks', 'static', 'media', 'data', 'migrations',\n '.doctrees', '_static', 'node_modules', 'external',\n 'jobs', 'tryout', 'tmp', '_coverage',\n ]\n skipexts = ['.pyc', '~', '.svg', '.txt', '.TTF', '.tmp', '.errmail',\n '.email', '.bat', '.dll', '.exe', '.Dll', '.jpg', '.gif',\n '.png', '.ico', '.db', '.md5']\n dirname = str(dirname)\n skipfiles = read_skipfile(dirname)\n\n def clean_dirs(directories):\n \"\"\"Remove directories that should be skipped.\n \"\"\"\n for d in directories:\n if d.endswith('.egg-info'):\n directories.remove(d)\n for d in skipdirs:\n if d in directories:\n directories.remove(d)\n\n def keep_file(filename, filepath):\n \"\"\"Returns False if the file should be skipped.\n \"\"\"\n if filename.startswith('.'):\n return False\n if filepath in skipfiles:\n return False\n for ext in skipexts:\n if filename.endswith(ext):\n return False\n return True\n\n for root, dirs, files in os.walk(os.path.abspath(dirname)):\n clean_dirs(dirs)\n for fname in files:\n relpth = os.path.relpath(\n os.path.join(root, fname),\n dirname\n ).replace('\\\\', '/')\n\n parts = Path(relpth).parts()\n if not keep_file(fname, relpth) or \\\n any(p.startswith('.') for p in parts):\n continue\n\n pth = os.path.join(dirname, relpth)\n if digest:\n yield md5(open(pth, 'rb').read()).hexdigest(), relpth\n else:\n yield relpth\n"
] |
# -*- coding: utf-8 -*-
"""Check if contents of directory has changed.
"""
from __future__ import print_function
import argparse
import os
import hashlib
from .listfiles import list_files
from .path import Path
def changed(dirname, filename='.md5', args=None, glob=None):
"""Has `glob` changed in `dirname`
Args:
dirname: directory to measure
filename: filename to store checksum
"""
root = Path(dirname)
if not root.exists():
# if dirname doesn't exist it is changed (by definition)
return True
cachefile = root / filename
current_digest = cachefile.open().read() if cachefile.exists() else ""
_digest = digest(dirname, glob=glob)
if args and args.verbose: # pragma: nocover
print("md5:", _digest)
has_changed = current_digest != _digest
if has_changed:
with open(os.path.join(dirname, filename), 'w') as fp:
fp.write(_digest)
return has_changed
class Directory(Path):
"""A path that is a directory.
"""
def changed(self, filename='.md5', glob=None):
"""Are any of the files matched by ``glob`` changed?
"""
if glob is not None:
filename += '.glob-' + ''.join(ch.lower()
for ch in glob if ch.isalpha())
return changed(self, filename, glob=glob)
def main(): # pragma: nocover
"""Return exit code of zero iff directory is not changed.
"""
p = argparse.ArgumentParser()
p.add_argument(
'directory',
help="Directory to check"
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="increase verbosity"
)
args = p.parse_args()
import sys
_changed = changed(sys.argv[1], args=args)
sys.exit(_changed)
if __name__ == "__main__": # pragma: nocover
main()
|
datakortet/dkfileutils
|
dkfileutils/changed.py
|
changed
|
python
|
def changed(dirname, filename='.md5', args=None, glob=None):
root = Path(dirname)
if not root.exists():
# if dirname doesn't exist it is changed (by definition)
return True
cachefile = root / filename
current_digest = cachefile.open().read() if cachefile.exists() else ""
_digest = digest(dirname, glob=glob)
if args and args.verbose: # pragma: nocover
print("md5:", _digest)
has_changed = current_digest != _digest
if has_changed:
with open(os.path.join(dirname, filename), 'w') as fp:
fp.write(_digest)
return has_changed
|
Has `glob` changed in `dirname`
Args:
dirname: directory to measure
filename: filename to store checksum
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/changed.py#L28-L52
|
[
"def digest(dirname, glob=None):\n \"\"\"Returns the md5 digest of all interesting files (or glob) in `dirname`.\n \"\"\"\n md5 = hashlib.md5()\n if glob is None:\n fnames = [fname for _, fname in list_files(Path(dirname))]\n for fname in sorted(fnames):\n fname = os.path.join(dirname, fname)\n md5.update(open(fname, 'rb').read())\n else:\n fnames = Path(dirname).glob(glob)\n for fname in sorted(fnames):\n md5.update(fname.open('rb').read())\n return md5.hexdigest()\n"
] |
# -*- coding: utf-8 -*-
"""Check if contents of directory has changed.
"""
from __future__ import print_function
import argparse
import os
import hashlib
from .listfiles import list_files
from .path import Path
def digest(dirname, glob=None):
"""Returns the md5 digest of all interesting files (or glob) in `dirname`.
"""
md5 = hashlib.md5()
if glob is None:
fnames = [fname for _, fname in list_files(Path(dirname))]
for fname in sorted(fnames):
fname = os.path.join(dirname, fname)
md5.update(open(fname, 'rb').read())
else:
fnames = Path(dirname).glob(glob)
for fname in sorted(fnames):
md5.update(fname.open('rb').read())
return md5.hexdigest()
def changed(dirname, filename='.md5', args=None, glob=None):
"""Has `glob` changed in `dirname`
Args:
dirname: directory to measure
filename: filename to store checksum
"""
root = Path(dirname)
if not root.exists():
# if dirname doesn't exist it is changed (by definition)
return True
cachefile = root / filename
current_digest = cachefile.open().read() if cachefile.exists() else ""
_digest = digest(dirname, glob=glob)
if args and args.verbose: # pragma: nocover
print("md5:", _digest)
has_changed = current_digest != _digest
if has_changed:
with open(os.path.join(dirname, filename), 'w') as fp:
fp.write(_digest)
return has_changed
class Directory(Path):
"""A path that is a directory.
"""
def changed(self, filename='.md5', glob=None):
"""Are any of the files matched by ``glob`` changed?
"""
if glob is not None:
filename += '.glob-' + ''.join(ch.lower()
for ch in glob if ch.isalpha())
return changed(self, filename, glob=glob)
def main(): # pragma: nocover
"""Return exit code of zero iff directory is not changed.
"""
p = argparse.ArgumentParser()
p.add_argument(
'directory',
help="Directory to check"
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="increase verbosity"
)
args = p.parse_args()
import sys
_changed = changed(sys.argv[1], args=args)
sys.exit(_changed)
if __name__ == "__main__": # pragma: nocover
main()
|
datakortet/dkfileutils
|
dkfileutils/changed.py
|
main
|
python
|
def main(): # pragma: nocover
p = argparse.ArgumentParser()
p.add_argument(
'directory',
help="Directory to check"
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="increase verbosity"
)
args = p.parse_args()
import sys
_changed = changed(sys.argv[1], args=args)
sys.exit(_changed)
|
Return exit code of zero iff directory is not changed.
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/changed.py#L67-L83
|
[
"def changed(dirname, filename='.md5', args=None, glob=None):\n \"\"\"Has `glob` changed in `dirname`\n\n Args:\n dirname: directory to measure\n filename: filename to store checksum\n \"\"\"\n root = Path(dirname)\n if not root.exists():\n # if dirname doesn't exist it is changed (by definition)\n return True\n\n cachefile = root / filename\n current_digest = cachefile.open().read() if cachefile.exists() else \"\"\n\n _digest = digest(dirname, glob=glob)\n if args and args.verbose: # pragma: nocover\n print(\"md5:\", _digest)\n has_changed = current_digest != _digest\n\n if has_changed:\n with open(os.path.join(dirname, filename), 'w') as fp:\n fp.write(_digest)\n\n return has_changed\n"
] |
# -*- coding: utf-8 -*-
"""Check if contents of directory has changed.
"""
from __future__ import print_function
import argparse
import os
import hashlib
from .listfiles import list_files
from .path import Path
def digest(dirname, glob=None):
"""Returns the md5 digest of all interesting files (or glob) in `dirname`.
"""
md5 = hashlib.md5()
if glob is None:
fnames = [fname for _, fname in list_files(Path(dirname))]
for fname in sorted(fnames):
fname = os.path.join(dirname, fname)
md5.update(open(fname, 'rb').read())
else:
fnames = Path(dirname).glob(glob)
for fname in sorted(fnames):
md5.update(fname.open('rb').read())
return md5.hexdigest()
def changed(dirname, filename='.md5', args=None, glob=None):
"""Has `glob` changed in `dirname`
Args:
dirname: directory to measure
filename: filename to store checksum
"""
root = Path(dirname)
if not root.exists():
# if dirname doesn't exist it is changed (by definition)
return True
cachefile = root / filename
current_digest = cachefile.open().read() if cachefile.exists() else ""
_digest = digest(dirname, glob=glob)
if args and args.verbose: # pragma: nocover
print("md5:", _digest)
has_changed = current_digest != _digest
if has_changed:
with open(os.path.join(dirname, filename), 'w') as fp:
fp.write(_digest)
return has_changed
class Directory(Path):
"""A path that is a directory.
"""
def changed(self, filename='.md5', glob=None):
"""Are any of the files matched by ``glob`` changed?
"""
if glob is not None:
filename += '.glob-' + ''.join(ch.lower()
for ch in glob if ch.isalpha())
return changed(self, filename, glob=glob)
if __name__ == "__main__": # pragma: nocover
main()
|
datakortet/dkfileutils
|
dkfileutils/changed.py
|
Directory.changed
|
python
|
def changed(self, filename='.md5', glob=None):
if glob is not None:
filename += '.glob-' + ''.join(ch.lower()
for ch in glob if ch.isalpha())
return changed(self, filename, glob=glob)
|
Are any of the files matched by ``glob`` changed?
|
train
|
https://github.com/datakortet/dkfileutils/blob/924098d6e2edf88ad9b3ffdec9c74530f80a7d77/dkfileutils/changed.py#L58-L64
|
[
"def changed(dirname, filename='.md5', args=None, glob=None):\n \"\"\"Has `glob` changed in `dirname`\n\n Args:\n dirname: directory to measure\n filename: filename to store checksum\n \"\"\"\n root = Path(dirname)\n if not root.exists():\n # if dirname doesn't exist it is changed (by definition)\n return True\n\n cachefile = root / filename\n current_digest = cachefile.open().read() if cachefile.exists() else \"\"\n\n _digest = digest(dirname, glob=glob)\n if args and args.verbose: # pragma: nocover\n print(\"md5:\", _digest)\n has_changed = current_digest != _digest\n\n if has_changed:\n with open(os.path.join(dirname, filename), 'w') as fp:\n fp.write(_digest)\n\n return has_changed\n"
] |
class Directory(Path):
"""A path that is a directory.
"""
|
gear11/pypelogs
|
pypein/wikip.py
|
geo_filter
|
python
|
def geo_filter(d):
page = d["page"]
if not "revision" in page:
return None
title = page["title"]
if skip_article(title):
LOG.info("Skipping low-value article %s", title)
return None
text = page["revision"]["text"]
if not utils.is_str_type(text):
if "#text" in text:
text = text["#text"]
else:
return None
LOG.debug("--------------------------------------------------------------")
LOG.debug(title)
LOG.debug("--------------------------------------------------------------")
LOG.debug(text)
c = find_geo_coords(text)
u = wikip_url(title)
"""
m = hashlib.md5()
m.update(u.encode("UTF-8") if hasattr(u, 'encode') else u)
i = base64.urlsafe_b64encode(m.digest()).replace('=', '')
"""
return {
#"id": i,
"title": title,
"url": u,
"coords": c,
"updated": page["revision"].get("timestamp")
} if c else None
|
Inspects the given Wikipedia article dict for geo-coordinates.
If no coordinates are found, returns None. Otherwise, returns a new dict
with the title and URL of the original article, along with coordinates.
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/wikip.py#L76-L111
|
[
"def wikip_url(s):\n return 'http://wikipedia.org/wiki/'+s.replace(' ', '_')\n",
"def skip_article(title):\n \"\"\"Skips articles that have no value\"\"\"\n if title.find(\"Wikipedia:WikiProject National Register of Historic Places/\") == 0:\n return True\n return False\n",
"def find_geo_coords(s):\n \"\"\"Returns a list of lat/lons found by scanning the given text\"\"\"\n coords = []\n LOG.debug(\"Matching in text size %s\", len(s))\n for c in INFO_BOX_LAT_LON.findall(s):\n try:\n coord = (float(c[1]), float(c[2])) #, c[0])\n coords.append(coord)\n LOG.debug(\"Found info box lat/lon: %s\", coord)\n except Exception as ex:\n LOG.warn(\"Bad parse of info box %s: %s\", c, ex)\n for c in COORDS_GEN.findall(s):\n # Special cases\n if skip_coords(c):\n LOG.debug(\"Ignorning coords %s\", c)\n continue\n m = COORDS_GROUPS.search(c)\n if not m:\n LOG.warn(\"Unrecognized coord format: %s\", c)\n continue\n try:\n # Remove empty optional groups and remove pipes from matches\n g = [(s[0:-1] if s[-1] == '|' else s) for s in list(m.groups()) if s is not None and len(s)]\n #LOG.info(\"Found groups: %s\", g)\n if len(g) == 1: # Single lat|lon\n lat, lon = g[0].split('|')\n coord = (float(lat), float(lon)) #, c)\n coords.append(coord)\n LOG.debug(\"Found lat|lon: %s\", coord)\n elif g[3] == 'E' or g[3] == 'W':\n lat = depipe(g[0]) * (1 if g[1].upper() == 'N' else -1)\n lon = depipe(g[2]) * (1 if g[3].upper() == 'E' else -1)\n coord = (lat, lon) #, c)\n coords.append(coord)\n LOG.debug(\"Found lat|NS|lon|EW: %s\", coord)\n else:\n LOG.warn(\"Unrecognized coord format: %s (parsed %s)\", c, g)\n except Exception as ex:\n LOG.warn(\"Bad parse of %s: %s\", c, ex)\n l = []\n for c in set(coords): # Dedupe; the reality is non-trivial though...we want to keep only the most precise\n if c[0] > 90 or c[0] < -90 or c[1] > 180 or c[1] < -180 or (c[0] == 0 and c[1] == 0):\n LOG.warn(\"Invalid lat or lon: %s\", c)\n else:\n l.append({\"type\": \"Point\", \"coordinates\": (c[1], c[0])}) # GeoJSON, lon goes first\n return l\n"
] |
import g11pyutils as utils
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import XMLParser
import logging
import re
import base64
import hashlib
import codecs
LOG = logging.getLogger("wikip")
class WikipXMLParser(XMLParser):
def init(self, html=0, target=None, encoding=None):
super(WikipXMLParser, self).__init__(html, target, encoding)
def feed(self, data):
#LOG.warn("Feeding %s %s" % (type(%s), data))
# Yes this is awful, I've got to encode it...
#data = codecs.
r = super(WikipXMLParser, self).feed(data)
LOG.warn("Returned %s" % r)
return r
def Parse(self, data, num):
LOG.warn("Hello!!!!")
super(WikipXMLParser, self).Parse(data, num)
class WikipArticles(object):
"""Iterates over a Wikipedia XML article dump, producing one event per article.
The event is a deeply-nested dict matching the article XML and capturing the full article contents.
"""
def __init__(self, article_file=None, filter = None):
self.fo = utils.fopen(article_file, 'b') # 'utf-8')
self.filter = filter
LOG.info("Using ElementTree version %s", ET.VERSION)
def __iter__(self):
# get an iterable
context = ET.iterparse(self.fo, events=("start", "end"))#, parser=XMLParser(encoding="UTF-8"))
ET.register_namespace('', 'http://www.mediawiki.org/xml/export-0.8/')
# turn it into an iterator
context = iter(context)
# get the root element
event, root = next(context)
LOG.info("Root attrib: %s", root.attrib)
for event, el in context:
tag = bare(el.tag)
LOG.debug("Event: %s, El: %s, Tag: '%s'", event, el, tag)
if event == "end" and tag == "page":
d = utils.etree_to_dict(el)
if self.filter:
try:
d = self.filter(d)
if d:
yield d
except Exception as e:
LOG.warn("Exception filtering article: %s", e)
else:
yield d
root.clear() # clear each time to prevent memory growth
class WikipGeo(WikipArticles):
def __init__(self, article_file=None):
super(WikipGeo, self).__init__(article_file, geo_filter)
def wikip_url(s):
return 'http://wikipedia.org/wiki/'+s.replace(' ', '_')
def skip_article(title):
"""Skips articles that have no value"""
if title.find("Wikipedia:WikiProject National Register of Historic Places/") == 0:
return True
return False
def bare(tag):
"""Returns a tag stripped of preceding namespace info"""
n = tag.rfind('}')
return tag[n+1:] if n >= 0 else tag
'''
| latitude = 48.8738
| longitude = 2.2950
'''
INFO_BOX_LAT_LON = re.compile(r"(\|\s*latitude\s*=\s*(-?[\d\.]+)\s*\|\s*longitude\s*=\s*(-?[\d\.]+))", re.MULTILINE )
'''
{{coord|35.0797|-80.7742|region:US-NC_type:edu|display=title}}
{{coord|77|51|S|166|40|E|}}
'''
COORDS_GEN = re.compile(r"(\{\{coord\|[^\}]+\}\})")
#COORDS_GROUPS = re.compile(r"\{\{coord\|(?:display[^\|]+\|)?((?:\s*-?[\d\.]+\s*\|?){1,3})([NS]\|)?((?:\s*-?[\d\.]+\s*\|){0,3})([EW])?")
COORDS_GROUPS = re.compile(r"\{\{coord\|(?:[^\d\|]+\|)*((?:\s*-?[\d\.]+\s*\|?){1,3})([NS]\|)?((?:\s*-?[\d\.]+\s*\|){0,3})([EW])?")
def find_geo_coords(s):
"""Returns a list of lat/lons found by scanning the given text"""
coords = []
LOG.debug("Matching in text size %s", len(s))
for c in INFO_BOX_LAT_LON.findall(s):
try:
coord = (float(c[1]), float(c[2])) #, c[0])
coords.append(coord)
LOG.debug("Found info box lat/lon: %s", coord)
except Exception as ex:
LOG.warn("Bad parse of info box %s: %s", c, ex)
for c in COORDS_GEN.findall(s):
# Special cases
if skip_coords(c):
LOG.debug("Ignorning coords %s", c)
continue
m = COORDS_GROUPS.search(c)
if not m:
LOG.warn("Unrecognized coord format: %s", c)
continue
try:
# Remove empty optional groups and remove pipes from matches
g = [(s[0:-1] if s[-1] == '|' else s) for s in list(m.groups()) if s is not None and len(s)]
#LOG.info("Found groups: %s", g)
if len(g) == 1: # Single lat|lon
lat, lon = g[0].split('|')
coord = (float(lat), float(lon)) #, c)
coords.append(coord)
LOG.debug("Found lat|lon: %s", coord)
elif g[3] == 'E' or g[3] == 'W':
lat = depipe(g[0]) * (1 if g[1].upper() == 'N' else -1)
lon = depipe(g[2]) * (1 if g[3].upper() == 'E' else -1)
coord = (lat, lon) #, c)
coords.append(coord)
LOG.debug("Found lat|NS|lon|EW: %s", coord)
else:
LOG.warn("Unrecognized coord format: %s (parsed %s)", c, g)
except Exception as ex:
LOG.warn("Bad parse of %s: %s", c, ex)
l = []
for c in set(coords): # Dedupe; the reality is non-trivial though...we want to keep only the most precise
if c[0] > 90 or c[0] < -90 or c[1] > 180 or c[1] < -180 or (c[0] == 0 and c[1] == 0):
LOG.warn("Invalid lat or lon: %s", c)
else:
l.append({"type": "Point", "coordinates": (c[1], c[0])}) # GeoJSON, lon goes first
return l
def depipe(s):
"""Convert a string of the form DD or DD|MM or DD|MM|SS to decimal degrees"""
n = 0
for i in reversed(s.split('|')):
n = n / 60.0 + float(i)
return n
def skip_coords(c):
"""Skip coordinate strings that are not valid"""
if c == "{{coord|LAT|LONG|display=inline,title}}": # Unpopulated coord template
return True
if c.find("globe:") >= 0 and c.find("globe:earth") == -1: # Moon, venus, etc.
return True
return False
|
gear11/pypelogs
|
pypein/wikip.py
|
find_geo_coords
|
python
|
def find_geo_coords(s):
coords = []
LOG.debug("Matching in text size %s", len(s))
for c in INFO_BOX_LAT_LON.findall(s):
try:
coord = (float(c[1]), float(c[2])) #, c[0])
coords.append(coord)
LOG.debug("Found info box lat/lon: %s", coord)
except Exception as ex:
LOG.warn("Bad parse of info box %s: %s", c, ex)
for c in COORDS_GEN.findall(s):
# Special cases
if skip_coords(c):
LOG.debug("Ignorning coords %s", c)
continue
m = COORDS_GROUPS.search(c)
if not m:
LOG.warn("Unrecognized coord format: %s", c)
continue
try:
# Remove empty optional groups and remove pipes from matches
g = [(s[0:-1] if s[-1] == '|' else s) for s in list(m.groups()) if s is not None and len(s)]
#LOG.info("Found groups: %s", g)
if len(g) == 1: # Single lat|lon
lat, lon = g[0].split('|')
coord = (float(lat), float(lon)) #, c)
coords.append(coord)
LOG.debug("Found lat|lon: %s", coord)
elif g[3] == 'E' or g[3] == 'W':
lat = depipe(g[0]) * (1 if g[1].upper() == 'N' else -1)
lon = depipe(g[2]) * (1 if g[3].upper() == 'E' else -1)
coord = (lat, lon) #, c)
coords.append(coord)
LOG.debug("Found lat|NS|lon|EW: %s", coord)
else:
LOG.warn("Unrecognized coord format: %s (parsed %s)", c, g)
except Exception as ex:
LOG.warn("Bad parse of %s: %s", c, ex)
l = []
for c in set(coords): # Dedupe; the reality is non-trivial though...we want to keep only the most precise
if c[0] > 90 or c[0] < -90 or c[1] > 180 or c[1] < -180 or (c[0] == 0 and c[1] == 0):
LOG.warn("Invalid lat or lon: %s", c)
else:
l.append({"type": "Point", "coordinates": (c[1], c[0])}) # GeoJSON, lon goes first
return l
|
Returns a list of lat/lons found by scanning the given text
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/wikip.py#L132-L177
|
[
"def skip_coords(c):\n \"\"\"Skip coordinate strings that are not valid\"\"\"\n if c == \"{{coord|LAT|LONG|display=inline,title}}\": # Unpopulated coord template\n return True\n if c.find(\"globe:\") >= 0 and c.find(\"globe:earth\") == -1: # Moon, venus, etc.\n return True\n return False\n",
"def depipe(s):\n \"\"\"Convert a string of the form DD or DD|MM or DD|MM|SS to decimal degrees\"\"\"\n n = 0\n for i in reversed(s.split('|')):\n n = n / 60.0 + float(i)\n return n\n"
] |
import g11pyutils as utils
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import XMLParser
import logging
import re
import base64
import hashlib
import codecs
LOG = logging.getLogger("wikip")
class WikipXMLParser(XMLParser):
def init(self, html=0, target=None, encoding=None):
super(WikipXMLParser, self).__init__(html, target, encoding)
def feed(self, data):
#LOG.warn("Feeding %s %s" % (type(%s), data))
# Yes this is awful, I've got to encode it...
#data = codecs.
r = super(WikipXMLParser, self).feed(data)
LOG.warn("Returned %s" % r)
return r
def Parse(self, data, num):
LOG.warn("Hello!!!!")
super(WikipXMLParser, self).Parse(data, num)
class WikipArticles(object):
"""Iterates over a Wikipedia XML article dump, producing one event per article.
The event is a deeply-nested dict matching the article XML and capturing the full article contents.
"""
def __init__(self, article_file=None, filter = None):
self.fo = utils.fopen(article_file, 'b') # 'utf-8')
self.filter = filter
LOG.info("Using ElementTree version %s", ET.VERSION)
def __iter__(self):
# get an iterable
context = ET.iterparse(self.fo, events=("start", "end"))#, parser=XMLParser(encoding="UTF-8"))
ET.register_namespace('', 'http://www.mediawiki.org/xml/export-0.8/')
# turn it into an iterator
context = iter(context)
# get the root element
event, root = next(context)
LOG.info("Root attrib: %s", root.attrib)
for event, el in context:
tag = bare(el.tag)
LOG.debug("Event: %s, El: %s, Tag: '%s'", event, el, tag)
if event == "end" and tag == "page":
d = utils.etree_to_dict(el)
if self.filter:
try:
d = self.filter(d)
if d:
yield d
except Exception as e:
LOG.warn("Exception filtering article: %s", e)
else:
yield d
root.clear() # clear each time to prevent memory growth
class WikipGeo(WikipArticles):
def __init__(self, article_file=None):
super(WikipGeo, self).__init__(article_file, geo_filter)
def wikip_url(s):
return 'http://wikipedia.org/wiki/'+s.replace(' ', '_')
def skip_article(title):
"""Skips articles that have no value"""
if title.find("Wikipedia:WikiProject National Register of Historic Places/") == 0:
return True
return False
def geo_filter(d):
"""Inspects the given Wikipedia article dict for geo-coordinates.
If no coordinates are found, returns None. Otherwise, returns a new dict
with the title and URL of the original article, along with coordinates."""
page = d["page"]
if not "revision" in page:
return None
title = page["title"]
if skip_article(title):
LOG.info("Skipping low-value article %s", title)
return None
text = page["revision"]["text"]
if not utils.is_str_type(text):
if "#text" in text:
text = text["#text"]
else:
return None
LOG.debug("--------------------------------------------------------------")
LOG.debug(title)
LOG.debug("--------------------------------------------------------------")
LOG.debug(text)
c = find_geo_coords(text)
u = wikip_url(title)
"""
m = hashlib.md5()
m.update(u.encode("UTF-8") if hasattr(u, 'encode') else u)
i = base64.urlsafe_b64encode(m.digest()).replace('=', '')
"""
return {
#"id": i,
"title": title,
"url": u,
"coords": c,
"updated": page["revision"].get("timestamp")
} if c else None
def bare(tag):
"""Returns a tag stripped of preceding namespace info"""
n = tag.rfind('}')
return tag[n+1:] if n >= 0 else tag
'''
| latitude = 48.8738
| longitude = 2.2950
'''
INFO_BOX_LAT_LON = re.compile(r"(\|\s*latitude\s*=\s*(-?[\d\.]+)\s*\|\s*longitude\s*=\s*(-?[\d\.]+))", re.MULTILINE )
'''
{{coord|35.0797|-80.7742|region:US-NC_type:edu|display=title}}
{{coord|77|51|S|166|40|E|}}
'''
COORDS_GEN = re.compile(r"(\{\{coord\|[^\}]+\}\})")
#COORDS_GROUPS = re.compile(r"\{\{coord\|(?:display[^\|]+\|)?((?:\s*-?[\d\.]+\s*\|?){1,3})([NS]\|)?((?:\s*-?[\d\.]+\s*\|){0,3})([EW])?")
COORDS_GROUPS = re.compile(r"\{\{coord\|(?:[^\d\|]+\|)*((?:\s*-?[\d\.]+\s*\|?){1,3})([NS]\|)?((?:\s*-?[\d\.]+\s*\|){0,3})([EW])?")
def depipe(s):
"""Convert a string of the form DD or DD|MM or DD|MM|SS to decimal degrees"""
n = 0
for i in reversed(s.split('|')):
n = n / 60.0 + float(i)
return n
def skip_coords(c):
"""Skip coordinate strings that are not valid"""
if c == "{{coord|LAT|LONG|display=inline,title}}": # Unpopulated coord template
return True
if c.find("globe:") >= 0 and c.find("globe:earth") == -1: # Moon, venus, etc.
return True
return False
|
gear11/pypelogs
|
pypein/wikip.py
|
depipe
|
python
|
def depipe(s):
n = 0
for i in reversed(s.split('|')):
n = n / 60.0 + float(i)
return n
|
Convert a string of the form DD or DD|MM or DD|MM|SS to decimal degrees
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/wikip.py#L179-L184
| null |
import g11pyutils as utils
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import XMLParser
import logging
import re
import base64
import hashlib
import codecs
LOG = logging.getLogger("wikip")
class WikipXMLParser(XMLParser):
def init(self, html=0, target=None, encoding=None):
super(WikipXMLParser, self).__init__(html, target, encoding)
def feed(self, data):
#LOG.warn("Feeding %s %s" % (type(%s), data))
# Yes this is awful, I've got to encode it...
#data = codecs.
r = super(WikipXMLParser, self).feed(data)
LOG.warn("Returned %s" % r)
return r
def Parse(self, data, num):
LOG.warn("Hello!!!!")
super(WikipXMLParser, self).Parse(data, num)
class WikipArticles(object):
"""Iterates over a Wikipedia XML article dump, producing one event per article.
The event is a deeply-nested dict matching the article XML and capturing the full article contents.
"""
def __init__(self, article_file=None, filter = None):
self.fo = utils.fopen(article_file, 'b') # 'utf-8')
self.filter = filter
LOG.info("Using ElementTree version %s", ET.VERSION)
def __iter__(self):
# get an iterable
context = ET.iterparse(self.fo, events=("start", "end"))#, parser=XMLParser(encoding="UTF-8"))
ET.register_namespace('', 'http://www.mediawiki.org/xml/export-0.8/')
# turn it into an iterator
context = iter(context)
# get the root element
event, root = next(context)
LOG.info("Root attrib: %s", root.attrib)
for event, el in context:
tag = bare(el.tag)
LOG.debug("Event: %s, El: %s, Tag: '%s'", event, el, tag)
if event == "end" and tag == "page":
d = utils.etree_to_dict(el)
if self.filter:
try:
d = self.filter(d)
if d:
yield d
except Exception as e:
LOG.warn("Exception filtering article: %s", e)
else:
yield d
root.clear() # clear each time to prevent memory growth
class WikipGeo(WikipArticles):
def __init__(self, article_file=None):
super(WikipGeo, self).__init__(article_file, geo_filter)
def wikip_url(s):
return 'http://wikipedia.org/wiki/'+s.replace(' ', '_')
def skip_article(title):
"""Skips articles that have no value"""
if title.find("Wikipedia:WikiProject National Register of Historic Places/") == 0:
return True
return False
def geo_filter(d):
"""Inspects the given Wikipedia article dict for geo-coordinates.
If no coordinates are found, returns None. Otherwise, returns a new dict
with the title and URL of the original article, along with coordinates."""
page = d["page"]
if not "revision" in page:
return None
title = page["title"]
if skip_article(title):
LOG.info("Skipping low-value article %s", title)
return None
text = page["revision"]["text"]
if not utils.is_str_type(text):
if "#text" in text:
text = text["#text"]
else:
return None
LOG.debug("--------------------------------------------------------------")
LOG.debug(title)
LOG.debug("--------------------------------------------------------------")
LOG.debug(text)
c = find_geo_coords(text)
u = wikip_url(title)
"""
m = hashlib.md5()
m.update(u.encode("UTF-8") if hasattr(u, 'encode') else u)
i = base64.urlsafe_b64encode(m.digest()).replace('=', '')
"""
return {
#"id": i,
"title": title,
"url": u,
"coords": c,
"updated": page["revision"].get("timestamp")
} if c else None
def bare(tag):
"""Returns a tag stripped of preceding namespace info"""
n = tag.rfind('}')
return tag[n+1:] if n >= 0 else tag
'''
| latitude = 48.8738
| longitude = 2.2950
'''
INFO_BOX_LAT_LON = re.compile(r"(\|\s*latitude\s*=\s*(-?[\d\.]+)\s*\|\s*longitude\s*=\s*(-?[\d\.]+))", re.MULTILINE )
'''
{{coord|35.0797|-80.7742|region:US-NC_type:edu|display=title}}
{{coord|77|51|S|166|40|E|}}
'''
COORDS_GEN = re.compile(r"(\{\{coord\|[^\}]+\}\})")
#COORDS_GROUPS = re.compile(r"\{\{coord\|(?:display[^\|]+\|)?((?:\s*-?[\d\.]+\s*\|?){1,3})([NS]\|)?((?:\s*-?[\d\.]+\s*\|){0,3})([EW])?")
COORDS_GROUPS = re.compile(r"\{\{coord\|(?:[^\d\|]+\|)*((?:\s*-?[\d\.]+\s*\|?){1,3})([NS]\|)?((?:\s*-?[\d\.]+\s*\|){0,3})([EW])?")
def find_geo_coords(s):
"""Returns a list of lat/lons found by scanning the given text"""
coords = []
LOG.debug("Matching in text size %s", len(s))
for c in INFO_BOX_LAT_LON.findall(s):
try:
coord = (float(c[1]), float(c[2])) #, c[0])
coords.append(coord)
LOG.debug("Found info box lat/lon: %s", coord)
except Exception as ex:
LOG.warn("Bad parse of info box %s: %s", c, ex)
for c in COORDS_GEN.findall(s):
# Special cases
if skip_coords(c):
LOG.debug("Ignorning coords %s", c)
continue
m = COORDS_GROUPS.search(c)
if not m:
LOG.warn("Unrecognized coord format: %s", c)
continue
try:
# Remove empty optional groups and remove pipes from matches
g = [(s[0:-1] if s[-1] == '|' else s) for s in list(m.groups()) if s is not None and len(s)]
#LOG.info("Found groups: %s", g)
if len(g) == 1: # Single lat|lon
lat, lon = g[0].split('|')
coord = (float(lat), float(lon)) #, c)
coords.append(coord)
LOG.debug("Found lat|lon: %s", coord)
elif g[3] == 'E' or g[3] == 'W':
lat = depipe(g[0]) * (1 if g[1].upper() == 'N' else -1)
lon = depipe(g[2]) * (1 if g[3].upper() == 'E' else -1)
coord = (lat, lon) #, c)
coords.append(coord)
LOG.debug("Found lat|NS|lon|EW: %s", coord)
else:
LOG.warn("Unrecognized coord format: %s (parsed %s)", c, g)
except Exception as ex:
LOG.warn("Bad parse of %s: %s", c, ex)
l = []
for c in set(coords): # Dedupe; the reality is non-trivial though...we want to keep only the most precise
if c[0] > 90 or c[0] < -90 or c[1] > 180 or c[1] < -180 or (c[0] == 0 and c[1] == 0):
LOG.warn("Invalid lat or lon: %s", c)
else:
l.append({"type": "Point", "coordinates": (c[1], c[0])}) # GeoJSON, lon goes first
return l
def skip_coords(c):
"""Skip coordinate strings that are not valid"""
if c == "{{coord|LAT|LONG|display=inline,title}}": # Unpopulated coord template
return True
if c.find("globe:") >= 0 and c.find("globe:earth") == -1: # Moon, venus, etc.
return True
return False
|
gear11/pypelogs
|
pypein/wikip.py
|
skip_coords
|
python
|
def skip_coords(c):
if c == "{{coord|LAT|LONG|display=inline,title}}": # Unpopulated coord template
return True
if c.find("globe:") >= 0 and c.find("globe:earth") == -1: # Moon, venus, etc.
return True
return False
|
Skip coordinate strings that are not valid
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/wikip.py#L186-L192
| null |
import g11pyutils as utils
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import XMLParser
import logging
import re
import base64
import hashlib
import codecs
LOG = logging.getLogger("wikip")
class WikipXMLParser(XMLParser):
def init(self, html=0, target=None, encoding=None):
super(WikipXMLParser, self).__init__(html, target, encoding)
def feed(self, data):
#LOG.warn("Feeding %s %s" % (type(%s), data))
# Yes this is awful, I've got to encode it...
#data = codecs.
r = super(WikipXMLParser, self).feed(data)
LOG.warn("Returned %s" % r)
return r
def Parse(self, data, num):
LOG.warn("Hello!!!!")
super(WikipXMLParser, self).Parse(data, num)
class WikipArticles(object):
"""Iterates over a Wikipedia XML article dump, producing one event per article.
The event is a deeply-nested dict matching the article XML and capturing the full article contents.
"""
def __init__(self, article_file=None, filter = None):
self.fo = utils.fopen(article_file, 'b') # 'utf-8')
self.filter = filter
LOG.info("Using ElementTree version %s", ET.VERSION)
def __iter__(self):
# get an iterable
context = ET.iterparse(self.fo, events=("start", "end"))#, parser=XMLParser(encoding="UTF-8"))
ET.register_namespace('', 'http://www.mediawiki.org/xml/export-0.8/')
# turn it into an iterator
context = iter(context)
# get the root element
event, root = next(context)
LOG.info("Root attrib: %s", root.attrib)
for event, el in context:
tag = bare(el.tag)
LOG.debug("Event: %s, El: %s, Tag: '%s'", event, el, tag)
if event == "end" and tag == "page":
d = utils.etree_to_dict(el)
if self.filter:
try:
d = self.filter(d)
if d:
yield d
except Exception as e:
LOG.warn("Exception filtering article: %s", e)
else:
yield d
root.clear() # clear each time to prevent memory growth
class WikipGeo(WikipArticles):
def __init__(self, article_file=None):
super(WikipGeo, self).__init__(article_file, geo_filter)
def wikip_url(s):
return 'http://wikipedia.org/wiki/'+s.replace(' ', '_')
def skip_article(title):
"""Skips articles that have no value"""
if title.find("Wikipedia:WikiProject National Register of Historic Places/") == 0:
return True
return False
def geo_filter(d):
"""Inspects the given Wikipedia article dict for geo-coordinates.
If no coordinates are found, returns None. Otherwise, returns a new dict
with the title and URL of the original article, along with coordinates."""
page = d["page"]
if not "revision" in page:
return None
title = page["title"]
if skip_article(title):
LOG.info("Skipping low-value article %s", title)
return None
text = page["revision"]["text"]
if not utils.is_str_type(text):
if "#text" in text:
text = text["#text"]
else:
return None
LOG.debug("--------------------------------------------------------------")
LOG.debug(title)
LOG.debug("--------------------------------------------------------------")
LOG.debug(text)
c = find_geo_coords(text)
u = wikip_url(title)
"""
m = hashlib.md5()
m.update(u.encode("UTF-8") if hasattr(u, 'encode') else u)
i = base64.urlsafe_b64encode(m.digest()).replace('=', '')
"""
return {
#"id": i,
"title": title,
"url": u,
"coords": c,
"updated": page["revision"].get("timestamp")
} if c else None
def bare(tag):
"""Returns a tag stripped of preceding namespace info"""
n = tag.rfind('}')
return tag[n+1:] if n >= 0 else tag
'''
| latitude = 48.8738
| longitude = 2.2950
'''
INFO_BOX_LAT_LON = re.compile(r"(\|\s*latitude\s*=\s*(-?[\d\.]+)\s*\|\s*longitude\s*=\s*(-?[\d\.]+))", re.MULTILINE )
'''
{{coord|35.0797|-80.7742|region:US-NC_type:edu|display=title}}
{{coord|77|51|S|166|40|E|}}
'''
COORDS_GEN = re.compile(r"(\{\{coord\|[^\}]+\}\})")
#COORDS_GROUPS = re.compile(r"\{\{coord\|(?:display[^\|]+\|)?((?:\s*-?[\d\.]+\s*\|?){1,3})([NS]\|)?((?:\s*-?[\d\.]+\s*\|){0,3})([EW])?")
COORDS_GROUPS = re.compile(r"\{\{coord\|(?:[^\d\|]+\|)*((?:\s*-?[\d\.]+\s*\|?){1,3})([NS]\|)?((?:\s*-?[\d\.]+\s*\|){0,3})([EW])?")
def find_geo_coords(s):
"""Returns a list of lat/lons found by scanning the given text"""
coords = []
LOG.debug("Matching in text size %s", len(s))
for c in INFO_BOX_LAT_LON.findall(s):
try:
coord = (float(c[1]), float(c[2])) #, c[0])
coords.append(coord)
LOG.debug("Found info box lat/lon: %s", coord)
except Exception as ex:
LOG.warn("Bad parse of info box %s: %s", c, ex)
for c in COORDS_GEN.findall(s):
# Special cases
if skip_coords(c):
LOG.debug("Ignorning coords %s", c)
continue
m = COORDS_GROUPS.search(c)
if not m:
LOG.warn("Unrecognized coord format: %s", c)
continue
try:
# Remove empty optional groups and remove pipes from matches
g = [(s[0:-1] if s[-1] == '|' else s) for s in list(m.groups()) if s is not None and len(s)]
#LOG.info("Found groups: %s", g)
if len(g) == 1: # Single lat|lon
lat, lon = g[0].split('|')
coord = (float(lat), float(lon)) #, c)
coords.append(coord)
LOG.debug("Found lat|lon: %s", coord)
elif g[3] == 'E' or g[3] == 'W':
lat = depipe(g[0]) * (1 if g[1].upper() == 'N' else -1)
lon = depipe(g[2]) * (1 if g[3].upper() == 'E' else -1)
coord = (lat, lon) #, c)
coords.append(coord)
LOG.debug("Found lat|NS|lon|EW: %s", coord)
else:
LOG.warn("Unrecognized coord format: %s (parsed %s)", c, g)
except Exception as ex:
LOG.warn("Bad parse of %s: %s", c, ex)
l = []
for c in set(coords): # Dedupe; the reality is non-trivial though...we want to keep only the most precise
if c[0] > 90 or c[0] < -90 or c[1] > 180 or c[1] < -180 or (c[0] == 0 and c[1] == 0):
LOG.warn("Invalid lat or lon: %s", c)
else:
l.append({"type": "Point", "coordinates": (c[1], c[0])}) # GeoJSON, lon goes first
return l
def depipe(s):
"""Convert a string of the form DD or DD|MM or DD|MM|SS to decimal degrees"""
n = 0
for i in reversed(s.split('|')):
n = n / 60.0 + float(i)
return n
|
gear11/pypelogs
|
pypein/flickr.py
|
Flickr.photo
|
python
|
def photo(self, args):
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0]))
p = rsp['photo']
yield self._prep(p)
|
Retrieves metadata for a specific photo.
flickr:(credsfile),photo,(photo_id)
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L58-L66
|
[
"def _prep(e):\n \"\"\"\n Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.\n \"\"\"\n if 'lastupdate' in e:\n e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))\n for k in ['farm', 'server', 'id', 'secret']:\n if not k in e:\n return e\n e[\"url\"] = \"https://farm%s.staticflickr.com/%s/%s_%s_b.jpg\" % (e[\"farm\"], e[\"server\"], e[\"id\"], e[\"secret\"])\n return e\n",
"def _load_rsp(rsp):\n \"\"\"\n Converts raw Flickr string response to Python dict\n \"\"\"\n first = rsp.find('(') + 1\n last = rsp.rfind(')')\n return json.loads(rsp[first:last])\n"
] |
class Flickr(object):
"""
Input from the Flickr API. Uses the flickrapi package from PIP ( http://stuvel.eu/flickrapi )
Usage:
flickr:credsfile,command,args
The credsfile is a JSON credentials file containing the Flickr API key and secret:
{
'api_key': '....',
'api_secret': '....'
}
Supported commands are:
interesting
photo,(id)
search,arg1=val1,arg2=val2...
"""
def __init__(self, spec):
# Defer import until we need it
try:
import flickrapi
tokens = spec.split(',')
creds_fname = tokens[0]
if len(tokens) > 1:
self.cmd = tokens[1]
else:
raise ValueError("Spec must inlude a command.")
self.args = tokens[2:] if len(tokens) > 2 else []
# Parse creds file
with open(creds_fname, "r") as fo:
creds = eval(fo.read())
LOG.info("Using creds: %s" % creds)
self.flickr = flickrapi.FlickrAPI(creds['api_key'], creds['api_secret'], format='json')
except ImportError as ex:
LOG.error("Use of this input requires installing the flickrapi module ('pip install flickrapi')")
raise ex
def __iter__(self):
try:
yielded = 0
rsp = getattr(self, self.cmd)(self.args)
for e in rsp:
yielded += 1
yield e
LOG.info("Method '%s' yielded %s rows" % (self.cmd, yielded))
except Exception as err:
LOG.exception(err)
def search(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs)
def interesting(self, args=None):
"""
Gets interesting photos.
flickr:(credsfile),interesting
"""
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'}
return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group')
def group(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'group_id': args[0]}
return self._paged_api_call(self.flickr.groups_pools_getPhotos, kwargs)
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
@staticmethod
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e
@staticmethod
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last])
|
gear11/pypelogs
|
pypein/flickr.py
|
Flickr.search
|
python
|
def search(self, args):
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs)
|
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L68-L78
|
[
"def _paged_api_call(self, func, kwargs, item_type='photo'):\n \"\"\"\n Takes a Flickr API function object and dict of keyword args and calls the\n API call repeatedly with an incrementing page value until all contents are exhausted.\n Flickr seems to limit to about 500 items.\n \"\"\"\n page = 1\n while True:\n LOG.info(\"Fetching page %s\" % page)\n kwargs['page'] = page\n rsp = self._load_rsp(func(**kwargs))\n if rsp[\"stat\"] == \"ok\":\n plural = item_type + 's'\n if plural in rsp:\n items = rsp[plural]\n if int(items[\"page\"]) < page:\n LOG.info(\"End of Flickr pages (%s pages with %s per page)\" % (items[\"pages\"], items[\"perpage\"]))\n break\n for i in items[item_type]:\n yield self._prep(i)\n else:\n yield rsp\n page += 1\n else:\n yield [rsp]\n break\n"
] |
class Flickr(object):
"""
Input from the Flickr API. Uses the flickrapi package from PIP ( http://stuvel.eu/flickrapi )
Usage:
flickr:credsfile,command,args
The credsfile is a JSON credentials file containing the Flickr API key and secret:
{
'api_key': '....',
'api_secret': '....'
}
Supported commands are:
interesting
photo,(id)
search,arg1=val1,arg2=val2...
"""
def __init__(self, spec):
# Defer import until we need it
try:
import flickrapi
tokens = spec.split(',')
creds_fname = tokens[0]
if len(tokens) > 1:
self.cmd = tokens[1]
else:
raise ValueError("Spec must inlude a command.")
self.args = tokens[2:] if len(tokens) > 2 else []
# Parse creds file
with open(creds_fname, "r") as fo:
creds = eval(fo.read())
LOG.info("Using creds: %s" % creds)
self.flickr = flickrapi.FlickrAPI(creds['api_key'], creds['api_secret'], format='json')
except ImportError as ex:
LOG.error("Use of this input requires installing the flickrapi module ('pip install flickrapi')")
raise ex
def __iter__(self):
try:
yielded = 0
rsp = getattr(self, self.cmd)(self.args)
for e in rsp:
yielded += 1
yield e
LOG.info("Method '%s' yielded %s rows" % (self.cmd, yielded))
except Exception as err:
LOG.exception(err)
def photo(self, args):
"""
Retrieves metadata for a specific photo.
flickr:(credsfile),photo,(photo_id)
"""
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0]))
p = rsp['photo']
yield self._prep(p)
def interesting(self, args=None):
"""
Gets interesting photos.
flickr:(credsfile),interesting
"""
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'}
return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group')
def group(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'group_id': args[0]}
return self._paged_api_call(self.flickr.groups_pools_getPhotos, kwargs)
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
@staticmethod
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e
@staticmethod
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last])
|
gear11/pypelogs
|
pypein/flickr.py
|
Flickr.interesting
|
python
|
def interesting(self, args=None):
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'}
return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
|
Gets interesting photos.
flickr:(credsfile),interesting
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L80-L88
|
[
"def _paged_api_call(self, func, kwargs, item_type='photo'):\n \"\"\"\n Takes a Flickr API function object and dict of keyword args and calls the\n API call repeatedly with an incrementing page value until all contents are exhausted.\n Flickr seems to limit to about 500 items.\n \"\"\"\n page = 1\n while True:\n LOG.info(\"Fetching page %s\" % page)\n kwargs['page'] = page\n rsp = self._load_rsp(func(**kwargs))\n if rsp[\"stat\"] == \"ok\":\n plural = item_type + 's'\n if plural in rsp:\n items = rsp[plural]\n if int(items[\"page\"]) < page:\n LOG.info(\"End of Flickr pages (%s pages with %s per page)\" % (items[\"pages\"], items[\"perpage\"]))\n break\n for i in items[item_type]:\n yield self._prep(i)\n else:\n yield rsp\n page += 1\n else:\n yield [rsp]\n break\n"
] |
class Flickr(object):
"""
Input from the Flickr API. Uses the flickrapi package from PIP ( http://stuvel.eu/flickrapi )
Usage:
flickr:credsfile,command,args
The credsfile is a JSON credentials file containing the Flickr API key and secret:
{
'api_key': '....',
'api_secret': '....'
}
Supported commands are:
interesting
photo,(id)
search,arg1=val1,arg2=val2...
"""
def __init__(self, spec):
# Defer import until we need it
try:
import flickrapi
tokens = spec.split(',')
creds_fname = tokens[0]
if len(tokens) > 1:
self.cmd = tokens[1]
else:
raise ValueError("Spec must inlude a command.")
self.args = tokens[2:] if len(tokens) > 2 else []
# Parse creds file
with open(creds_fname, "r") as fo:
creds = eval(fo.read())
LOG.info("Using creds: %s" % creds)
self.flickr = flickrapi.FlickrAPI(creds['api_key'], creds['api_secret'], format='json')
except ImportError as ex:
LOG.error("Use of this input requires installing the flickrapi module ('pip install flickrapi')")
raise ex
def __iter__(self):
try:
yielded = 0
rsp = getattr(self, self.cmd)(self.args)
for e in rsp:
yielded += 1
yield e
LOG.info("Method '%s' yielded %s rows" % (self.cmd, yielded))
except Exception as err:
LOG.exception(err)
def photo(self, args):
"""
Retrieves metadata for a specific photo.
flickr:(credsfile),photo,(photo_id)
"""
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0]))
p = rsp['photo']
yield self._prep(p)
def search(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs)
def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group')
def group(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'group_id': args[0]}
return self._paged_api_call(self.flickr.groups_pools_getPhotos, kwargs)
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
@staticmethod
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e
@staticmethod
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last])
|
gear11/pypelogs
|
pypein/flickr.py
|
Flickr.search_groups
|
python
|
def search_groups(self, args):
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group')
|
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L90-L97
|
[
"def _paged_api_call(self, func, kwargs, item_type='photo'):\n \"\"\"\n Takes a Flickr API function object and dict of keyword args and calls the\n API call repeatedly with an incrementing page value until all contents are exhausted.\n Flickr seems to limit to about 500 items.\n \"\"\"\n page = 1\n while True:\n LOG.info(\"Fetching page %s\" % page)\n kwargs['page'] = page\n rsp = self._load_rsp(func(**kwargs))\n if rsp[\"stat\"] == \"ok\":\n plural = item_type + 's'\n if plural in rsp:\n items = rsp[plural]\n if int(items[\"page\"]) < page:\n LOG.info(\"End of Flickr pages (%s pages with %s per page)\" % (items[\"pages\"], items[\"perpage\"]))\n break\n for i in items[item_type]:\n yield self._prep(i)\n else:\n yield rsp\n page += 1\n else:\n yield [rsp]\n break\n"
] |
class Flickr(object):
"""
Input from the Flickr API. Uses the flickrapi package from PIP ( http://stuvel.eu/flickrapi )
Usage:
flickr:credsfile,command,args
The credsfile is a JSON credentials file containing the Flickr API key and secret:
{
'api_key': '....',
'api_secret': '....'
}
Supported commands are:
interesting
photo,(id)
search,arg1=val1,arg2=val2...
"""
def __init__(self, spec):
# Defer import until we need it
try:
import flickrapi
tokens = spec.split(',')
creds_fname = tokens[0]
if len(tokens) > 1:
self.cmd = tokens[1]
else:
raise ValueError("Spec must inlude a command.")
self.args = tokens[2:] if len(tokens) > 2 else []
# Parse creds file
with open(creds_fname, "r") as fo:
creds = eval(fo.read())
LOG.info("Using creds: %s" % creds)
self.flickr = flickrapi.FlickrAPI(creds['api_key'], creds['api_secret'], format='json')
except ImportError as ex:
LOG.error("Use of this input requires installing the flickrapi module ('pip install flickrapi')")
raise ex
def __iter__(self):
try:
yielded = 0
rsp = getattr(self, self.cmd)(self.args)
for e in rsp:
yielded += 1
yield e
LOG.info("Method '%s' yielded %s rows" % (self.cmd, yielded))
except Exception as err:
LOG.exception(err)
def photo(self, args):
"""
Retrieves metadata for a specific photo.
flickr:(credsfile),photo,(photo_id)
"""
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0]))
p = rsp['photo']
yield self._prep(p)
def search(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs)
def interesting(self, args=None):
"""
Gets interesting photos.
flickr:(credsfile),interesting
"""
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'}
return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
def group(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'group_id': args[0]}
return self._paged_api_call(self.flickr.groups_pools_getPhotos, kwargs)
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
@staticmethod
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e
@staticmethod
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last])
|
gear11/pypelogs
|
pypein/flickr.py
|
Flickr.group
|
python
|
def group(self, args):
kwargs = {'group_id': args[0]}
return self._paged_api_call(self.flickr.groups_pools_getPhotos, kwargs)
|
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L99-L106
|
[
"def _paged_api_call(self, func, kwargs, item_type='photo'):\n \"\"\"\n Takes a Flickr API function object and dict of keyword args and calls the\n API call repeatedly with an incrementing page value until all contents are exhausted.\n Flickr seems to limit to about 500 items.\n \"\"\"\n page = 1\n while True:\n LOG.info(\"Fetching page %s\" % page)\n kwargs['page'] = page\n rsp = self._load_rsp(func(**kwargs))\n if rsp[\"stat\"] == \"ok\":\n plural = item_type + 's'\n if plural in rsp:\n items = rsp[plural]\n if int(items[\"page\"]) < page:\n LOG.info(\"End of Flickr pages (%s pages with %s per page)\" % (items[\"pages\"], items[\"perpage\"]))\n break\n for i in items[item_type]:\n yield self._prep(i)\n else:\n yield rsp\n page += 1\n else:\n yield [rsp]\n break\n"
] |
class Flickr(object):
"""
Input from the Flickr API. Uses the flickrapi package from PIP ( http://stuvel.eu/flickrapi )
Usage:
flickr:credsfile,command,args
The credsfile is a JSON credentials file containing the Flickr API key and secret:
{
'api_key': '....',
'api_secret': '....'
}
Supported commands are:
interesting
photo,(id)
search,arg1=val1,arg2=val2...
"""
def __init__(self, spec):
# Defer import until we need it
try:
import flickrapi
tokens = spec.split(',')
creds_fname = tokens[0]
if len(tokens) > 1:
self.cmd = tokens[1]
else:
raise ValueError("Spec must inlude a command.")
self.args = tokens[2:] if len(tokens) > 2 else []
# Parse creds file
with open(creds_fname, "r") as fo:
creds = eval(fo.read())
LOG.info("Using creds: %s" % creds)
self.flickr = flickrapi.FlickrAPI(creds['api_key'], creds['api_secret'], format='json')
except ImportError as ex:
LOG.error("Use of this input requires installing the flickrapi module ('pip install flickrapi')")
raise ex
def __iter__(self):
try:
yielded = 0
rsp = getattr(self, self.cmd)(self.args)
for e in rsp:
yielded += 1
yield e
LOG.info("Method '%s' yielded %s rows" % (self.cmd, yielded))
except Exception as err:
LOG.exception(err)
def photo(self, args):
"""
Retrieves metadata for a specific photo.
flickr:(credsfile),photo,(photo_id)
"""
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0]))
p = rsp['photo']
yield self._prep(p)
def search(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs)
def interesting(self, args=None):
"""
Gets interesting photos.
flickr:(credsfile),interesting
"""
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'}
return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group')
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
@staticmethod
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e
@staticmethod
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last])
|
gear11/pypelogs
|
pypein/flickr.py
|
Flickr._paged_api_call
|
python
|
def _paged_api_call(self, func, kwargs, item_type='photo'):
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
|
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L108-L133
|
[
"def _prep(e):\n \"\"\"\n Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.\n \"\"\"\n if 'lastupdate' in e:\n e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))\n for k in ['farm', 'server', 'id', 'secret']:\n if not k in e:\n return e\n e[\"url\"] = \"https://farm%s.staticflickr.com/%s/%s_%s_b.jpg\" % (e[\"farm\"], e[\"server\"], e[\"id\"], e[\"secret\"])\n return e\n",
"def _load_rsp(rsp):\n \"\"\"\n Converts raw Flickr string response to Python dict\n \"\"\"\n first = rsp.find('(') + 1\n last = rsp.rfind(')')\n return json.loads(rsp[first:last])\n"
] |
class Flickr(object):
"""
Input from the Flickr API. Uses the flickrapi package from PIP ( http://stuvel.eu/flickrapi )
Usage:
flickr:credsfile,command,args
The credsfile is a JSON credentials file containing the Flickr API key and secret:
{
'api_key': '....',
'api_secret': '....'
}
Supported commands are:
interesting
photo,(id)
search,arg1=val1,arg2=val2...
"""
def __init__(self, spec):
# Defer import until we need it
try:
import flickrapi
tokens = spec.split(',')
creds_fname = tokens[0]
if len(tokens) > 1:
self.cmd = tokens[1]
else:
raise ValueError("Spec must inlude a command.")
self.args = tokens[2:] if len(tokens) > 2 else []
# Parse creds file
with open(creds_fname, "r") as fo:
creds = eval(fo.read())
LOG.info("Using creds: %s" % creds)
self.flickr = flickrapi.FlickrAPI(creds['api_key'], creds['api_secret'], format='json')
except ImportError as ex:
LOG.error("Use of this input requires installing the flickrapi module ('pip install flickrapi')")
raise ex
def __iter__(self):
try:
yielded = 0
rsp = getattr(self, self.cmd)(self.args)
for e in rsp:
yielded += 1
yield e
LOG.info("Method '%s' yielded %s rows" % (self.cmd, yielded))
except Exception as err:
LOG.exception(err)
def photo(self, args):
"""
Retrieves metadata for a specific photo.
flickr:(credsfile),photo,(photo_id)
"""
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0]))
p = rsp['photo']
yield self._prep(p)
def search(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs)
def interesting(self, args=None):
"""
Gets interesting photos.
flickr:(credsfile),interesting
"""
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'}
return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group')
def group(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'group_id': args[0]}
return self._paged_api_call(self.flickr.groups_pools_getPhotos, kwargs)
@staticmethod
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e
@staticmethod
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last])
|
gear11/pypelogs
|
pypein/flickr.py
|
Flickr._prep
|
python
|
def _prep(e):
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e
|
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L136-L146
| null |
class Flickr(object):
"""
Input from the Flickr API. Uses the flickrapi package from PIP ( http://stuvel.eu/flickrapi )
Usage:
flickr:credsfile,command,args
The credsfile is a JSON credentials file containing the Flickr API key and secret:
{
'api_key': '....',
'api_secret': '....'
}
Supported commands are:
interesting
photo,(id)
search,arg1=val1,arg2=val2...
"""
def __init__(self, spec):
# Defer import until we need it
try:
import flickrapi
tokens = spec.split(',')
creds_fname = tokens[0]
if len(tokens) > 1:
self.cmd = tokens[1]
else:
raise ValueError("Spec must inlude a command.")
self.args = tokens[2:] if len(tokens) > 2 else []
# Parse creds file
with open(creds_fname, "r") as fo:
creds = eval(fo.read())
LOG.info("Using creds: %s" % creds)
self.flickr = flickrapi.FlickrAPI(creds['api_key'], creds['api_secret'], format='json')
except ImportError as ex:
LOG.error("Use of this input requires installing the flickrapi module ('pip install flickrapi')")
raise ex
def __iter__(self):
try:
yielded = 0
rsp = getattr(self, self.cmd)(self.args)
for e in rsp:
yielded += 1
yield e
LOG.info("Method '%s' yielded %s rows" % (self.cmd, yielded))
except Exception as err:
LOG.exception(err)
def photo(self, args):
"""
Retrieves metadata for a specific photo.
flickr:(credsfile),photo,(photo_id)
"""
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0]))
p = rsp['photo']
yield self._prep(p)
def search(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs)
def interesting(self, args=None):
"""
Gets interesting photos.
flickr:(credsfile),interesting
"""
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'}
return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group')
def group(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'group_id': args[0]}
return self._paged_api_call(self.flickr.groups_pools_getPhotos, kwargs)
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
@staticmethod
@staticmethod
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last])
|
gear11/pypelogs
|
pypein/flickr.py
|
Flickr._load_rsp
|
python
|
def _load_rsp(rsp):
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last])
|
Converts raw Flickr string response to Python dict
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/flickr.py#L149-L155
| null |
class Flickr(object):
"""
Input from the Flickr API. Uses the flickrapi package from PIP ( http://stuvel.eu/flickrapi )
Usage:
flickr:credsfile,command,args
The credsfile is a JSON credentials file containing the Flickr API key and secret:
{
'api_key': '....',
'api_secret': '....'
}
Supported commands are:
interesting
photo,(id)
search,arg1=val1,arg2=val2...
"""
def __init__(self, spec):
# Defer import until we need it
try:
import flickrapi
tokens = spec.split(',')
creds_fname = tokens[0]
if len(tokens) > 1:
self.cmd = tokens[1]
else:
raise ValueError("Spec must inlude a command.")
self.args = tokens[2:] if len(tokens) > 2 else []
# Parse creds file
with open(creds_fname, "r") as fo:
creds = eval(fo.read())
LOG.info("Using creds: %s" % creds)
self.flickr = flickrapi.FlickrAPI(creds['api_key'], creds['api_secret'], format='json')
except ImportError as ex:
LOG.error("Use of this input requires installing the flickrapi module ('pip install flickrapi')")
raise ex
def __iter__(self):
try:
yielded = 0
rsp = getattr(self, self.cmd)(self.args)
for e in rsp:
yielded += 1
yield e
LOG.info("Method '%s' yielded %s rows" % (self.cmd, yielded))
except Exception as err:
LOG.exception(err)
def photo(self, args):
"""
Retrieves metadata for a specific photo.
flickr:(credsfile),photo,(photo_id)
"""
rsp = self._load_rsp(self.flickr.photos_getInfo(photo_id=args[0]))
p = rsp['photo']
yield self._prep(p)
def search(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {}
for a in args:
k, v = a.split('=')
kwargs[k] = v
return self._paged_api_call(self.flickr.photos_search, kwargs)
def interesting(self, args=None):
"""
Gets interesting photos.
flickr:(credsfile),interesting
"""
kwargs = {'extras': ','.join(args) if args else 'last_update,geo,owner_name,url_sq'}
return self._paged_api_call(self.flickr.interestingness_getList, kwargs)
def search_groups(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'text': args[0]}
return self._paged_api_call(self.flickr.groups_search, kwargs, 'group')
def group(self, args):
"""
Executes a search
flickr:(credsfile),search,(arg1)=(val1),(arg2)=(val2)...
"""
kwargs = {'group_id': args[0]}
return self._paged_api_call(self.flickr.groups_pools_getPhotos, kwargs)
def _paged_api_call(self, func, kwargs, item_type='photo'):
"""
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
"""
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs['page'] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type + 's'
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
@staticmethod
def _prep(e):
"""
Normalizes lastupdate to a timestamp, and constructs a URL from the embedded attributes.
"""
if 'lastupdate' in e:
e['lastupdate'] = datetime.datetime.fromtimestamp(int(e['lastupdate']))
for k in ['farm', 'server', 'id', 'secret']:
if not k in e:
return e
e["url"] = "https://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (e["farm"], e["server"], e["id"], e["secret"])
return e
@staticmethod
|
gear11/pypelogs
|
pypein/nginx.py
|
Nginx.to_ts
|
python
|
def to_ts(s):
# Strip TZ portion if present
m = Nginx.DATE_FMT.match(s)
if m:
s = m.group(1)
delta = timedelta(seconds=int(m.group(3)) * (-1 if m.group(2) == '-' else 1)) # Offset from GMT
else:
delta = timedelta(seconds=0)
dt = datetime.strptime(s, "%d/%b/%Y:%H:%M:%S")
dt += delta
return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
|
Parses an NGINX timestamp from "30/Apr/2014:07:32:09 +0000" and returns it as ISO 8601"
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypein/nginx.py#L65-L77
| null |
class Nginx(object):
"""
Parses an NGINX log with the following format:
10.208.128.193 - - [30/Apr/2014:07:32:09 +0000] "GET / HTTP/1.1" 200 2089 "-" "Mozilla/5.0 (Lin...37.36" 0.028 "149.254.219.174, 66.249.93.213, 10.183.252.20, ::ffff:127.0.0.1,::ffff:127.0.0.1"
And assigns to event properties as follows:
{client_ip} - {remote_user} [{timestamp}] "GET {uri} HTTP/{http_ver}" {status} {length} "{referer}" "{user_agent}" {duration} "{forwarded_for}"
Properties which would be set to '-' (esp. remote_user and referer) are simply omitted
Accepts the option "deproxy=true" which will
"""
LOG_FMT = re.compile(r'([^\s]+) - ([^\s]+) \[([^\]]+)\]\s+"(\w+) ([^\s]+) HTTP/([^"]+)" (\d+) (\d+) "([^"]*)" "([^"]*)" ([^\s]+) "([^"]+)"')
def __init__(self, spec=None):
if spec:
args = spec.split(',', 1)
f = args[0]
self.opts = utils.to_dict(args[1]) if len(args) is 2 else {}
else:
f = None
self.opts = {}
self.fo = utils.fopen(f)
def __iter__(self):
for line in self.fo:
LOG.debug("Raw line: %s" % line)
m = Nginx.LOG_FMT.match(line)
if not m:
LOG.warn("Input line did not match regex: %s" % line)
else:
g = m.groups()
# Always set these
e = {"method": g[3], "uri": g[4], "http_ver": g[5], "status": int(g[6]), "length": int(g[7]), "duration":float(g[10])}
# Set these only if not '-'
if g[2] and g[2] != '-':
e['remote_user'] = g[2]
if g[8] and g[8] != '-':
e['referer'] = g[8]
if g[9] and g[9] != '-':
e['user_agent'] = g[9]
# Parse timestamp
e['timestamp'] = Nginx.to_ts(g[2])
# For deproxy, client is actually left-most forwarded IP
if self.opts.get('deproxy') == "true" and g[11]:
ips = [s.strip() for s in g[11].split(",")]
e['rev_proxy_ip'] = g[0]
e['client_ip'] = ips[0]
if len(ips) > 1:
e['forwarded_for'] = ",".join(ips[1:])
else:
e['client_ip'] = g[0]
e['forwarded_for'] = g[11]
yield e
DATE_FMT = re.compile(r'([^\s]+) ([\+\-])(\d+)')
@staticmethod
|
gear11/pypelogs
|
pypelogs.py
|
process
|
python
|
def process(specs):
pout, pin = chain_specs(specs)
LOG.info("Processing")
sw = StopWatch().start()
r = pout.process(pin)
if r:
print(r)
LOG.info("Finished in %s", sw.read())
|
Executes the passed in list of specs
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypelogs.py#L33-L43
|
[
"def chain_specs(specs):\n \"\"\"\n Parses the incoming list of specs and produces a tuple (pout, pin) that can be invoked with:\n <pre>\n result = pout.process(pin)\n </pre>\n \"\"\"\n LOG.info(\"Parsing %s specs\", len(specs))\n # First spec is always an input\n pin = pypein.input_for(specs[0]).__iter__()\n # If only an input spec was provided, then use json for output\n if len(specs) == 1:\n pout = pypeout.output_for(\"json\")\n else:\n # Middle specs are filters that successively wrap input\n for s in specs[1:-1]:\n pin = pypef.filter_for(s).filter(pin)\n # Assume output on last spec, but it may be a filter.\n # If last spec is a filter, use json for output\n try:\n pout = pypeout.output_for(specs[-1])\n except pypeout.NoSuchOutputException:\n pin = pypef.filter_for(specs[-1]).filter(pin)\n pout = pypeout.output_for(\"json\")\n return pout, pin\n"
] |
import logging
import argparse
import sys
import pypein
import pypef
import pypeout
from g11pyutils import StopWatch
LOG = logging.getLogger("pypelogs")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('specs', metavar='S', nargs='*', help='A pype specification')
parser.add_argument("-d", "--debug", help="Log at debug level", action='store_true')
parser.add_argument("-i", "--info", help="Log at info level", action='store_true')
parser.add_argument("-x", "--execute", help="A config file to execute before running.")
args = parser.parse_args()
level = logging.DEBUG if args.debug else logging.INFO if args.info else logging.WARNING
logging.basicConfig(format='%(asctime)-15s %(levelname)s:%(name)s:%(message)s', level=level, stream=sys.stderr)
if args.execute:
LOG.info("Running config file %s" % args.execute)
exec(compile(open(args.execute, "rb").read(), args.execute, 'exec'), globals())
if args.specs:
process(args.specs)
elif not args.execute:
LOG.warn("No specs provided and no file executed")
def chain_specs(specs):
"""
Parses the incoming list of specs and produces a tuple (pout, pin) that can be invoked with:
<pre>
result = pout.process(pin)
</pre>
"""
LOG.info("Parsing %s specs", len(specs))
# First spec is always an input
pin = pypein.input_for(specs[0]).__iter__()
# If only an input spec was provided, then use json for output
if len(specs) == 1:
pout = pypeout.output_for("json")
else:
# Middle specs are filters that successively wrap input
for s in specs[1:-1]:
pin = pypef.filter_for(s).filter(pin)
# Assume output on last spec, but it may be a filter.
# If last spec is a filter, use json for output
try:
pout = pypeout.output_for(specs[-1])
except pypeout.NoSuchOutputException:
pin = pypef.filter_for(specs[-1]).filter(pin)
pout = pypeout.output_for("json")
return pout, pin
def register_input(s, clz):
pypein.register(s, clz)
def register_filter(s, clz):
pypef.register(s, clz)
def register_output(s, clz):
pypeout.register(s, clz)
if __name__ == '__main__':
main()
|
gear11/pypelogs
|
pypelogs.py
|
chain_specs
|
python
|
def chain_specs(specs):
LOG.info("Parsing %s specs", len(specs))
# First spec is always an input
pin = pypein.input_for(specs[0]).__iter__()
# If only an input spec was provided, then use json for output
if len(specs) == 1:
pout = pypeout.output_for("json")
else:
# Middle specs are filters that successively wrap input
for s in specs[1:-1]:
pin = pypef.filter_for(s).filter(pin)
# Assume output on last spec, but it may be a filter.
# If last spec is a filter, use json for output
try:
pout = pypeout.output_for(specs[-1])
except pypeout.NoSuchOutputException:
pin = pypef.filter_for(specs[-1]).filter(pin)
pout = pypeout.output_for("json")
return pout, pin
|
Parses the incoming list of specs and produces a tuple (pout, pin) that can be invoked with:
<pre>
result = pout.process(pin)
</pre>
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypelogs.py#L46-L70
|
[
"def input_for(s):\n spec_args = s.split(':', 1)\n clz = CLASSES.get(spec_args[0])\n if not clz:\n raise ValueError(\"No such input type: %s\", spec_args[0])\n return clz() if len(spec_args) == 1 else clz(spec_args[1])\n",
"def filter_for(s):\n spec_args = s.split(':', 1)\n clz = CLASSES.get(spec_args[0])\n if not clz:\n raise ValueError(\"No such filter type: %s\", spec_args[0])\n return clz() if len(spec_args) == 1 else clz(spec_args[1])\n",
"def output_for(s):\n spec_args = s.split(':', 1)\n clz = CLASSES.get(spec_args[0])\n if not clz:\n raise NoSuchOutputException(spec_args[0])\n return clz() if len(spec_args) == 1 else clz(spec_args[1])\n"
] |
import logging
import argparse
import sys
import pypein
import pypef
import pypeout
from g11pyutils import StopWatch
LOG = logging.getLogger("pypelogs")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('specs', metavar='S', nargs='*', help='A pype specification')
parser.add_argument("-d", "--debug", help="Log at debug level", action='store_true')
parser.add_argument("-i", "--info", help="Log at info level", action='store_true')
parser.add_argument("-x", "--execute", help="A config file to execute before running.")
args = parser.parse_args()
level = logging.DEBUG if args.debug else logging.INFO if args.info else logging.WARNING
logging.basicConfig(format='%(asctime)-15s %(levelname)s:%(name)s:%(message)s', level=level, stream=sys.stderr)
if args.execute:
LOG.info("Running config file %s" % args.execute)
exec(compile(open(args.execute, "rb").read(), args.execute, 'exec'), globals())
if args.specs:
process(args.specs)
elif not args.execute:
LOG.warn("No specs provided and no file executed")
def process(specs):
"""
Executes the passed in list of specs
"""
pout, pin = chain_specs(specs)
LOG.info("Processing")
sw = StopWatch().start()
r = pout.process(pin)
if r:
print(r)
LOG.info("Finished in %s", sw.read())
def register_input(s, clz):
pypein.register(s, clz)
def register_filter(s, clz):
pypef.register(s, clz)
def register_output(s, clz):
pypeout.register(s, clz)
if __name__ == '__main__':
main()
|
gear11/pypelogs
|
pypeout/mysql_out.py
|
MySQLOut.upsert
|
python
|
def upsert(self, events):
existing = self.get_existing_keys(events)
inserts = [e for e in events if not e[self.key] in existing]
updates = [e for e in events if e[self.key] in existing]
self.insert(inserts)
self.update(updates)
|
Inserts/updates the given events into MySQL
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypeout/mysql_out.py#L35-L41
|
[
"def get_existing_keys(self, events):\n \"\"\"Returns the list of keys from the given event source that are already in the DB\"\"\"\n data = [e[self.key] for e in events]\n ss = ','.join(['%s' for _ in data])\n query = 'SELECT %s FROM %s WHERE %s IN (%s)' % (self.key, self.table, self.key, ss)\n cursor = self.conn.conn.cursor()\n cursor.execute(query, data)\n LOG.info(\"%s (data: %s)\", query, data)\n existing = [r[0] for r in cursor.fetchall()]\n LOG.info(\"Existing IDs: %s\" % existing)\n return set(existing)\n",
"def insert(self, events):\n \"\"\"Constructs and executes a MySQL insert for the given events.\"\"\"\n if not len(events):\n return\n keys = sorted(events[0].keys())\n ss = ','.join(['%s' for _ in keys])\n query = 'INSERT INTO %s (%s) VALUES ' % (self.table, ','.join(keys))\n data = []\n for event in events:\n query += '(%s),' % ss\n data += [event[k] for k in keys]\n query = query[:-1] + ';'\n LOG.info(\"%s (data: %s)\", query, data)\n conn = self.conn.conn\n cursor = conn.cursor()\n cursor.execute(query, data)\n conn.commit()\n",
"def update(self, events):\n if not len(events):\n return\n # Get all non-key properties (by sampling 1st event)\n props = [p for p in sorted(events[0].keys()) if p != self.key]\n conn = self.conn.conn\n for event in events:\n query = 'UPDATE %s SET' % self.table\n for prop in props:\n query += ' %s=%%(%s)s,' % (prop, prop)\n query = query[:-1]\n query += ' WHERE %s = %%(%s)s;' % (self.key, self.key)\n LOG.info(\"%s (data: %s)\", query, event)\n cursor = conn.cursor()\n cursor.execute(query, event)\n cursor.close()\n # Make sure data is committed to the database\n conn.commit()\n"
] |
class MySQLOut(MySQLIn):
"""
Supports upserting of data into MySQL. Unlike the SQL output, this does a non-standard 'upsert' operation
that can update rows. Example usage:
mysql:username:password@host:port/db,upsert,table,key
The incoming events are expected to be compatible with the target table, and key indicates the field that
will be used to determine whether the action is an insert or update.
"""
def __init__(self, spec=""):
super(MySQLOut, self).__init__(spec)
cmd, self.table, self.key = self.query.split(',')
if cmd != 'upsert':
raise Exception("Only the 'upsert' command is currently supported")
def process(self, events):
bucket = []
for event in events:
bucket.append(event)
if len(bucket) >= BUCKET_SIZE:
self.upsert(bucket)
bucket = []
if len(bucket):
self.upsert(bucket)
def get_existing_keys(self, events):
"""Returns the list of keys from the given event source that are already in the DB"""
data = [e[self.key] for e in events]
ss = ','.join(['%s' for _ in data])
query = 'SELECT %s FROM %s WHERE %s IN (%s)' % (self.key, self.table, self.key, ss)
cursor = self.conn.conn.cursor()
cursor.execute(query, data)
LOG.info("%s (data: %s)", query, data)
existing = [r[0] for r in cursor.fetchall()]
LOG.info("Existing IDs: %s" % existing)
return set(existing)
def insert(self, events):
"""Constructs and executes a MySQL insert for the given events."""
if not len(events):
return
keys = sorted(events[0].keys())
ss = ','.join(['%s' for _ in keys])
query = 'INSERT INTO %s (%s) VALUES ' % (self.table, ','.join(keys))
data = []
for event in events:
query += '(%s),' % ss
data += [event[k] for k in keys]
query = query[:-1] + ';'
LOG.info("%s (data: %s)", query, data)
conn = self.conn.conn
cursor = conn.cursor()
cursor.execute(query, data)
conn.commit()
def update(self, events):
if not len(events):
return
# Get all non-key properties (by sampling 1st event)
props = [p for p in sorted(events[0].keys()) if p != self.key]
conn = self.conn.conn
for event in events:
query = 'UPDATE %s SET' % self.table
for prop in props:
query += ' %s=%%(%s)s,' % (prop, prop)
query = query[:-1]
query += ' WHERE %s = %%(%s)s;' % (self.key, self.key)
LOG.info("%s (data: %s)", query, event)
cursor = conn.cursor()
cursor.execute(query, event)
cursor.close()
# Make sure data is committed to the database
conn.commit()
|
gear11/pypelogs
|
pypeout/mysql_out.py
|
MySQLOut.get_existing_keys
|
python
|
def get_existing_keys(self, events):
data = [e[self.key] for e in events]
ss = ','.join(['%s' for _ in data])
query = 'SELECT %s FROM %s WHERE %s IN (%s)' % (self.key, self.table, self.key, ss)
cursor = self.conn.conn.cursor()
cursor.execute(query, data)
LOG.info("%s (data: %s)", query, data)
existing = [r[0] for r in cursor.fetchall()]
LOG.info("Existing IDs: %s" % existing)
return set(existing)
|
Returns the list of keys from the given event source that are already in the DB
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypeout/mysql_out.py#L43-L53
| null |
class MySQLOut(MySQLIn):
"""
Supports upserting of data into MySQL. Unlike the SQL output, this does a non-standard 'upsert' operation
that can update rows. Example usage:
mysql:username:password@host:port/db,upsert,table,key
The incoming events are expected to be compatible with the target table, and key indicates the field that
will be used to determine whether the action is an insert or update.
"""
def __init__(self, spec=""):
super(MySQLOut, self).__init__(spec)
cmd, self.table, self.key = self.query.split(',')
if cmd != 'upsert':
raise Exception("Only the 'upsert' command is currently supported")
def process(self, events):
bucket = []
for event in events:
bucket.append(event)
if len(bucket) >= BUCKET_SIZE:
self.upsert(bucket)
bucket = []
if len(bucket):
self.upsert(bucket)
def upsert(self, events):
"""Inserts/updates the given events into MySQL"""
existing = self.get_existing_keys(events)
inserts = [e for e in events if not e[self.key] in existing]
updates = [e for e in events if e[self.key] in existing]
self.insert(inserts)
self.update(updates)
def insert(self, events):
"""Constructs and executes a MySQL insert for the given events."""
if not len(events):
return
keys = sorted(events[0].keys())
ss = ','.join(['%s' for _ in keys])
query = 'INSERT INTO %s (%s) VALUES ' % (self.table, ','.join(keys))
data = []
for event in events:
query += '(%s),' % ss
data += [event[k] for k in keys]
query = query[:-1] + ';'
LOG.info("%s (data: %s)", query, data)
conn = self.conn.conn
cursor = conn.cursor()
cursor.execute(query, data)
conn.commit()
def update(self, events):
if not len(events):
return
# Get all non-key properties (by sampling 1st event)
props = [p for p in sorted(events[0].keys()) if p != self.key]
conn = self.conn.conn
for event in events:
query = 'UPDATE %s SET' % self.table
for prop in props:
query += ' %s=%%(%s)s,' % (prop, prop)
query = query[:-1]
query += ' WHERE %s = %%(%s)s;' % (self.key, self.key)
LOG.info("%s (data: %s)", query, event)
cursor = conn.cursor()
cursor.execute(query, event)
cursor.close()
# Make sure data is committed to the database
conn.commit()
|
gear11/pypelogs
|
pypeout/mysql_out.py
|
MySQLOut.insert
|
python
|
def insert(self, events):
if not len(events):
return
keys = sorted(events[0].keys())
ss = ','.join(['%s' for _ in keys])
query = 'INSERT INTO %s (%s) VALUES ' % (self.table, ','.join(keys))
data = []
for event in events:
query += '(%s),' % ss
data += [event[k] for k in keys]
query = query[:-1] + ';'
LOG.info("%s (data: %s)", query, data)
conn = self.conn.conn
cursor = conn.cursor()
cursor.execute(query, data)
conn.commit()
|
Constructs and executes a MySQL insert for the given events.
|
train
|
https://github.com/gear11/pypelogs/blob/da5dc0fee5373a4be294798b5e32cd0a803d8bbe/pypeout/mysql_out.py#L55-L71
| null |
class MySQLOut(MySQLIn):
"""
Supports upserting of data into MySQL. Unlike the SQL output, this does a non-standard 'upsert' operation
that can update rows. Example usage:
mysql:username:password@host:port/db,upsert,table,key
The incoming events are expected to be compatible with the target table, and key indicates the field that
will be used to determine whether the action is an insert or update.
"""
def __init__(self, spec=""):
super(MySQLOut, self).__init__(spec)
cmd, self.table, self.key = self.query.split(',')
if cmd != 'upsert':
raise Exception("Only the 'upsert' command is currently supported")
def process(self, events):
bucket = []
for event in events:
bucket.append(event)
if len(bucket) >= BUCKET_SIZE:
self.upsert(bucket)
bucket = []
if len(bucket):
self.upsert(bucket)
def upsert(self, events):
"""Inserts/updates the given events into MySQL"""
existing = self.get_existing_keys(events)
inserts = [e for e in events if not e[self.key] in existing]
updates = [e for e in events if e[self.key] in existing]
self.insert(inserts)
self.update(updates)
def get_existing_keys(self, events):
"""Returns the list of keys from the given event source that are already in the DB"""
data = [e[self.key] for e in events]
ss = ','.join(['%s' for _ in data])
query = 'SELECT %s FROM %s WHERE %s IN (%s)' % (self.key, self.table, self.key, ss)
cursor = self.conn.conn.cursor()
cursor.execute(query, data)
LOG.info("%s (data: %s)", query, data)
existing = [r[0] for r in cursor.fetchall()]
LOG.info("Existing IDs: %s" % existing)
return set(existing)
def update(self, events):
if not len(events):
return
# Get all non-key properties (by sampling 1st event)
props = [p for p in sorted(events[0].keys()) if p != self.key]
conn = self.conn.conn
for event in events:
query = 'UPDATE %s SET' % self.table
for prop in props:
query += ' %s=%%(%s)s,' % (prop, prop)
query = query[:-1]
query += ' WHERE %s = %%(%s)s;' % (self.key, self.key)
LOG.info("%s (data: %s)", query, event)
cursor = conn.cursor()
cursor.execute(query, event)
cursor.close()
# Make sure data is committed to the database
conn.commit()
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/driver.py
|
dumplist
|
python
|
def dumplist(args):
from .query import Database
db = Database()
r = db.objects(
protocol=args.protocol,
purposes=args.purpose,
model_ids=(args.client,),
groups=args.group,
classes=args.sclass
)
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
for f in r:
output.write('%s\n' % (f.make_path(args.directory, args.extension),))
return 0
|
Dumps lists of files based on your criteria
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/driver.py#L23-L45
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Commands this database can respond to.
"""
import os
import sys
from bob.db.driver import Interface as BaseInterface
def checkfiles(args):
"""Checks existence of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects()
# go through all files, check if they are available on the filesystem
good = []
bad = []
for f in r:
if os.path.exists(f.make_path(args.directory, args.extension)):
good.append(f)
else:
bad.append(f)
# report
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
if bad:
for f in bad:
output.write('Cannot find file "%s"\n' % (f.make_path(args.directory, args.extension),))
output.write('%d files (out of %d) were not found at "%s"\n' % \
(len(bad), len(r), args.directory))
return 0
def reverse(args):
"""Returns a list of file database identifiers given the path stems"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.reverse(args.path)
for f in r: output.write('%d\n' % f.id)
if not r: return 1
return 0
def path(args):
"""Returns a list of fully formed paths or stems given some file id"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.paths(args.id, prefix=args.directory, suffix=args.extension)
for path in r: output.write('%s\n' % path)
if not r: return 1
return 0
class Interface(BaseInterface):
def name(self):
return 'atvskeystroke'
def version(self):
import pkg_resources # part of setuptools
return pkg_resources.require('xbob.db.%s' % self.name())[0].version
def files(self):
from pkg_resources import resource_filename
raw_files = ('db.sql3',)
return [resource_filename(__name__, k) for k in raw_files]
def type(self):
return 'sqlite'
def add_commands(self, parser):
from . import __doc__ as docs
subparsers = self.setup_parser(parser,
"ATVS Keystroke database", docs)
# example: get the "create" action from a submodule
from .create import add_command as create_command
create_command(subparsers)
from .query import Database
import argparse
db = Database()
# example: get the "dumplist" action from a submodule
parser = subparsers.add_parser('dumplist', help=dumplist.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('-p', '--protocol', help="if given, limits the dump to a particular subset of the data that corresponds to the given protocol.", choices=db.protocol_names() if db.is_valid() else ())
parser.add_argument('-u', '--purpose', help="if given, this value will limit the output files to those designed for the given purposes.", choices=db.purposes() if db.is_valid() else ())
parser.add_argument('-C', '--client', type=int, help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.model_ids() if db.is_valid() else ())
parser.add_argument('-g', '--group', help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.groups() if db.is_valid() else ())
parser.add_argument('-c', '--class', dest="sclass", help="if given, this value will limit the output files to those belonging to the given classes.", choices=('client', 'impostor', ''))
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=dumplist) #action
# the "checkfiles" action
parser = subparsers.add_parser('checkfiles', help=checkfiles.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=checkfiles) #action
# adds the "reverse" command
parser = subparsers.add_parser('reverse', help=reverse.__doc__)
parser.add_argument('path', nargs='+', type=str, help="one or more path stems to look up. If you provide more than one, files which cannot be reversed will be omitted from the output.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=reverse) #action
# adds the "path" command
parser = subparsers.add_parser('path', help=path.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('id', nargs='+', type=int, help="one or more file ids to look up. If you provide more than one, files which cannot be found will be omitted from the output. If you provide a single id to lookup, an error message will be printed if the id does not exist in the database. The exit status will be non-zero in such case.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=path) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/driver.py
|
checkfiles
|
python
|
def checkfiles(args):
from .query import Database
db = Database()
r = db.objects()
# go through all files, check if they are available on the filesystem
good = []
bad = []
for f in r:
if os.path.exists(f.make_path(args.directory, args.extension)):
good.append(f)
else:
bad.append(f)
# report
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
if bad:
for f in bad:
output.write('Cannot find file "%s"\n' % (f.make_path(args.directory, args.extension),))
output.write('%d files (out of %d) were not found at "%s"\n' % \
(len(bad), len(r), args.directory))
return 0
|
Checks existence of files based on your criteria
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/driver.py#L47-L76
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Commands this database can respond to.
"""
import os
import sys
from bob.db.driver import Interface as BaseInterface
def dumplist(args):
"""Dumps lists of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects(
protocol=args.protocol,
purposes=args.purpose,
model_ids=(args.client,),
groups=args.group,
classes=args.sclass
)
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
for f in r:
output.write('%s\n' % (f.make_path(args.directory, args.extension),))
return 0
def reverse(args):
"""Returns a list of file database identifiers given the path stems"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.reverse(args.path)
for f in r: output.write('%d\n' % f.id)
if not r: return 1
return 0
def path(args):
"""Returns a list of fully formed paths or stems given some file id"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.paths(args.id, prefix=args.directory, suffix=args.extension)
for path in r: output.write('%s\n' % path)
if not r: return 1
return 0
class Interface(BaseInterface):
def name(self):
return 'atvskeystroke'
def version(self):
import pkg_resources # part of setuptools
return pkg_resources.require('xbob.db.%s' % self.name())[0].version
def files(self):
from pkg_resources import resource_filename
raw_files = ('db.sql3',)
return [resource_filename(__name__, k) for k in raw_files]
def type(self):
return 'sqlite'
def add_commands(self, parser):
from . import __doc__ as docs
subparsers = self.setup_parser(parser,
"ATVS Keystroke database", docs)
# example: get the "create" action from a submodule
from .create import add_command as create_command
create_command(subparsers)
from .query import Database
import argparse
db = Database()
# example: get the "dumplist" action from a submodule
parser = subparsers.add_parser('dumplist', help=dumplist.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('-p', '--protocol', help="if given, limits the dump to a particular subset of the data that corresponds to the given protocol.", choices=db.protocol_names() if db.is_valid() else ())
parser.add_argument('-u', '--purpose', help="if given, this value will limit the output files to those designed for the given purposes.", choices=db.purposes() if db.is_valid() else ())
parser.add_argument('-C', '--client', type=int, help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.model_ids() if db.is_valid() else ())
parser.add_argument('-g', '--group', help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.groups() if db.is_valid() else ())
parser.add_argument('-c', '--class', dest="sclass", help="if given, this value will limit the output files to those belonging to the given classes.", choices=('client', 'impostor', ''))
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=dumplist) #action
# the "checkfiles" action
parser = subparsers.add_parser('checkfiles', help=checkfiles.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=checkfiles) #action
# adds the "reverse" command
parser = subparsers.add_parser('reverse', help=reverse.__doc__)
parser.add_argument('path', nargs='+', type=str, help="one or more path stems to look up. If you provide more than one, files which cannot be reversed will be omitted from the output.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=reverse) #action
# adds the "path" command
parser = subparsers.add_parser('path', help=path.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('id', nargs='+', type=int, help="one or more file ids to look up. If you provide more than one, files which cannot be found will be omitted from the output. If you provide a single id to lookup, an error message will be printed if the id does not exist in the database. The exit status will be non-zero in such case.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=path) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/driver.py
|
reverse
|
python
|
def reverse(args):
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.reverse(args.path)
for f in r: output.write('%d\n' % f.id)
if not r: return 1
return 0
|
Returns a list of file database identifiers given the path stems
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/driver.py#L78-L94
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Commands this database can respond to.
"""
import os
import sys
from bob.db.driver import Interface as BaseInterface
def dumplist(args):
"""Dumps lists of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects(
protocol=args.protocol,
purposes=args.purpose,
model_ids=(args.client,),
groups=args.group,
classes=args.sclass
)
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
for f in r:
output.write('%s\n' % (f.make_path(args.directory, args.extension),))
return 0
def checkfiles(args):
"""Checks existence of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects()
# go through all files, check if they are available on the filesystem
good = []
bad = []
for f in r:
if os.path.exists(f.make_path(args.directory, args.extension)):
good.append(f)
else:
bad.append(f)
# report
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
if bad:
for f in bad:
output.write('Cannot find file "%s"\n' % (f.make_path(args.directory, args.extension),))
output.write('%d files (out of %d) were not found at "%s"\n' % \
(len(bad), len(r), args.directory))
return 0
def path(args):
"""Returns a list of fully formed paths or stems given some file id"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.paths(args.id, prefix=args.directory, suffix=args.extension)
for path in r: output.write('%s\n' % path)
if not r: return 1
return 0
class Interface(BaseInterface):
def name(self):
return 'atvskeystroke'
def version(self):
import pkg_resources # part of setuptools
return pkg_resources.require('xbob.db.%s' % self.name())[0].version
def files(self):
from pkg_resources import resource_filename
raw_files = ('db.sql3',)
return [resource_filename(__name__, k) for k in raw_files]
def type(self):
return 'sqlite'
def add_commands(self, parser):
from . import __doc__ as docs
subparsers = self.setup_parser(parser,
"ATVS Keystroke database", docs)
# example: get the "create" action from a submodule
from .create import add_command as create_command
create_command(subparsers)
from .query import Database
import argparse
db = Database()
# example: get the "dumplist" action from a submodule
parser = subparsers.add_parser('dumplist', help=dumplist.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('-p', '--protocol', help="if given, limits the dump to a particular subset of the data that corresponds to the given protocol.", choices=db.protocol_names() if db.is_valid() else ())
parser.add_argument('-u', '--purpose', help="if given, this value will limit the output files to those designed for the given purposes.", choices=db.purposes() if db.is_valid() else ())
parser.add_argument('-C', '--client', type=int, help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.model_ids() if db.is_valid() else ())
parser.add_argument('-g', '--group', help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.groups() if db.is_valid() else ())
parser.add_argument('-c', '--class', dest="sclass", help="if given, this value will limit the output files to those belonging to the given classes.", choices=('client', 'impostor', ''))
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=dumplist) #action
# the "checkfiles" action
parser = subparsers.add_parser('checkfiles', help=checkfiles.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=checkfiles) #action
# adds the "reverse" command
parser = subparsers.add_parser('reverse', help=reverse.__doc__)
parser.add_argument('path', nargs='+', type=str, help="one or more path stems to look up. If you provide more than one, files which cannot be reversed will be omitted from the output.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=reverse) #action
# adds the "path" command
parser = subparsers.add_parser('path', help=path.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('id', nargs='+', type=int, help="one or more file ids to look up. If you provide more than one, files which cannot be found will be omitted from the output. If you provide a single id to lookup, an error message will be printed if the id does not exist in the database. The exit status will be non-zero in such case.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=path) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/driver.py
|
path
|
python
|
def path(args):
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.paths(args.id, prefix=args.directory, suffix=args.extension)
for path in r: output.write('%s\n' % path)
if not r: return 1
return 0
|
Returns a list of fully formed paths or stems given some file id
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/driver.py#L96-L112
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Commands this database can respond to.
"""
import os
import sys
from bob.db.driver import Interface as BaseInterface
def dumplist(args):
"""Dumps lists of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects(
protocol=args.protocol,
purposes=args.purpose,
model_ids=(args.client,),
groups=args.group,
classes=args.sclass
)
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
for f in r:
output.write('%s\n' % (f.make_path(args.directory, args.extension),))
return 0
def checkfiles(args):
"""Checks existence of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects()
# go through all files, check if they are available on the filesystem
good = []
bad = []
for f in r:
if os.path.exists(f.make_path(args.directory, args.extension)):
good.append(f)
else:
bad.append(f)
# report
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
if bad:
for f in bad:
output.write('Cannot find file "%s"\n' % (f.make_path(args.directory, args.extension),))
output.write('%d files (out of %d) were not found at "%s"\n' % \
(len(bad), len(r), args.directory))
return 0
def reverse(args):
"""Returns a list of file database identifiers given the path stems"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.reverse(args.path)
for f in r: output.write('%d\n' % f.id)
if not r: return 1
return 0
class Interface(BaseInterface):
def name(self):
return 'atvskeystroke'
def version(self):
import pkg_resources # part of setuptools
return pkg_resources.require('xbob.db.%s' % self.name())[0].version
def files(self):
from pkg_resources import resource_filename
raw_files = ('db.sql3',)
return [resource_filename(__name__, k) for k in raw_files]
def type(self):
return 'sqlite'
def add_commands(self, parser):
from . import __doc__ as docs
subparsers = self.setup_parser(parser,
"ATVS Keystroke database", docs)
# example: get the "create" action from a submodule
from .create import add_command as create_command
create_command(subparsers)
from .query import Database
import argparse
db = Database()
# example: get the "dumplist" action from a submodule
parser = subparsers.add_parser('dumplist', help=dumplist.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('-p', '--protocol', help="if given, limits the dump to a particular subset of the data that corresponds to the given protocol.", choices=db.protocol_names() if db.is_valid() else ())
parser.add_argument('-u', '--purpose', help="if given, this value will limit the output files to those designed for the given purposes.", choices=db.purposes() if db.is_valid() else ())
parser.add_argument('-C', '--client', type=int, help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.model_ids() if db.is_valid() else ())
parser.add_argument('-g', '--group', help="if given, this value will limit the output files to those belonging to a particular protocolar group.", choices=db.groups() if db.is_valid() else ())
parser.add_argument('-c', '--class', dest="sclass", help="if given, this value will limit the output files to those belonging to the given classes.", choices=('client', 'impostor', ''))
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=dumplist) #action
# the "checkfiles" action
parser = subparsers.add_parser('checkfiles', help=checkfiles.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=checkfiles) #action
# adds the "reverse" command
parser = subparsers.add_parser('reverse', help=reverse.__doc__)
parser.add_argument('path', nargs='+', type=str, help="one or more path stems to look up. If you provide more than one, files which cannot be reversed will be omitted from the output.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=reverse) #action
# adds the "path" command
parser = subparsers.add_parser('path', help=path.__doc__)
parser.add_argument('-d', '--directory', default='', help="if given, this path will be prepended to every entry returned.")
parser.add_argument('-e', '--extension', default='', help="if given, this extension will be appended to every entry returned.")
parser.add_argument('id', nargs='+', type=int, help="one or more file ids to look up. If you provide more than one, files which cannot be found will be omitted from the output. If you provide a single id to lookup, an error message will be printed if the id does not exist in the database. The exit status will be non-zero in such case.")
parser.add_argument('--self-test', dest="selftest", action='store_true', help=argparse.SUPPRESS)
parser.set_defaults(func=path) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/query.py
|
Database.clients
|
python
|
def clients(self, protocol=None, groups=None):
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", self.client_types())
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
q = q.order_by(Client.id)
return list(q)
|
Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the clients which have the given properties.
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L68-L91
| null |
class Database(xbob.db.verification.utils.SQLiteDatabase,xbob.db.verification.utils.Database):
"""The dataset class opens and maintains a connection opened to the Database.
It provides many different ways to probe for the characteristics of the data
and for the data itself inside the database.
"""
def __init__(self, original_directory = None, original_extension = db_file_extension):
# call base class constructor
xbob.db.verification.utils.SQLiteDatabase.__init__(self, SQLITE_FILE, File)
xbob.db.verification.utils.Database.__init__(self, original_directory=original_directory, original_extension=original_extension)
def __group_replace_eval_by_genuine__(self, l):
"""Replace 'eval' by 'Genuine' and returns the new list"""
if not l: return l
elif isinstance(l, six.string_types): return self.__group_replace_eval_by_genuine__((l,))
l2 = []
for val in l:
if (val == 'eval'): l2.append('Genuine')
elif (val in Client.type_choices): l2.append(val)
return tuple(set(l2))
def groups(self, protocol=None):
"""Returns the names of all registered groups"""
return ProtocolPurpose.group_choices
def client_types(self):
"""Returns the names of the types."""
return Client.type_choices
def client_groups(self):
"""Returns the names of the groups. This is specific to this database which
does not have separate training, development and evaluation sets."""
return ProtocolPurpose.group_choices
def models(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Models correspond to Clients for this database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('Genuine')
Note that 'dev', 'eval' and 'world' are alias for 'Genuine'.
Returns: A list containing all the models (model <-> client in BiosecurID) belonging
to the given group.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", ('Genuine',))
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
else:
q = q.filter(Client.stype.in_(['Genuine']))
q = q.order_by(Client.id)
return list(q)
def model_ids(self, protocol=None, groups=None):
"""Returns a list of model ids for the specific query by the user.
Models correspond to Clients for the XM2VTS database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('dev', 'eval', 'world')
Note that 'dev', 'eval' and 'world' are alias for 'client'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging
to the given group.
"""
return [client.subid for client in self.models(protocol, groups)]
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0
def client(self, id):
"""Returns the client object in the database given a certain id. Raises
an error if that does not exist."""
return self.query(Client).filter(Client.id==id).one()
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
"""Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
"""
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval)) # To remove duplicates
def protocol_names(self):
"""Returns all registered protocol names"""
l = self.protocols()
retval = [str(k.name) for k in l]
return retval
def protocols(self):
"""Returns all registered protocols"""
return list(self.query(Protocol))
def has_protocol(self, name):
"""Tells if a certain protocol is available"""
return self.query(Protocol).filter(Protocol.name==name).count() != 0
def protocol(self, name):
"""Returns the protocol object in the database given a certain name. Raises
an error if that does not exist."""
return self.query(Protocol).filter(Protocol.name==name).one()
def protocol_purposes(self):
"""Returns all registered protocol purposes"""
return list(self.query(ProtocolPurpose))
def purposes(self):
"""Returns the list of allowed purposes"""
return ProtocolPurpose.purpose_choices
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/query.py
|
Database.model_ids
|
python
|
def model_ids(self, protocol=None, groups=None):
return [client.subid for client in self.models(protocol, groups)]
|
Returns a list of model ids for the specific query by the user.
Models correspond to Clients for the XM2VTS database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('dev', 'eval', 'world')
Note that 'dev', 'eval' and 'world' are alias for 'client'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging
to the given group.
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L122-L140
| null |
class Database(xbob.db.verification.utils.SQLiteDatabase,xbob.db.verification.utils.Database):
"""The dataset class opens and maintains a connection opened to the Database.
It provides many different ways to probe for the characteristics of the data
and for the data itself inside the database.
"""
def __init__(self, original_directory = None, original_extension = db_file_extension):
# call base class constructor
xbob.db.verification.utils.SQLiteDatabase.__init__(self, SQLITE_FILE, File)
xbob.db.verification.utils.Database.__init__(self, original_directory=original_directory, original_extension=original_extension)
def __group_replace_eval_by_genuine__(self, l):
"""Replace 'eval' by 'Genuine' and returns the new list"""
if not l: return l
elif isinstance(l, six.string_types): return self.__group_replace_eval_by_genuine__((l,))
l2 = []
for val in l:
if (val == 'eval'): l2.append('Genuine')
elif (val in Client.type_choices): l2.append(val)
return tuple(set(l2))
def groups(self, protocol=None):
"""Returns the names of all registered groups"""
return ProtocolPurpose.group_choices
def client_types(self):
"""Returns the names of the types."""
return Client.type_choices
def client_groups(self):
"""Returns the names of the groups. This is specific to this database which
does not have separate training, development and evaluation sets."""
return ProtocolPurpose.group_choices
def clients(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the clients which have the given properties.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", self.client_types())
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
q = q.order_by(Client.id)
return list(q)
def models(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Models correspond to Clients for this database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('Genuine')
Note that 'dev', 'eval' and 'world' are alias for 'Genuine'.
Returns: A list containing all the models (model <-> client in BiosecurID) belonging
to the given group.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", ('Genuine',))
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
else:
q = q.filter(Client.stype.in_(['Genuine']))
q = q.order_by(Client.id)
return list(q)
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0
def client(self, id):
"""Returns the client object in the database given a certain id. Raises
an error if that does not exist."""
return self.query(Client).filter(Client.id==id).one()
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
"""Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
"""
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval)) # To remove duplicates
def protocol_names(self):
"""Returns all registered protocol names"""
l = self.protocols()
retval = [str(k.name) for k in l]
return retval
def protocols(self):
"""Returns all registered protocols"""
return list(self.query(Protocol))
def has_protocol(self, name):
"""Tells if a certain protocol is available"""
return self.query(Protocol).filter(Protocol.name==name).count() != 0
def protocol(self, name):
"""Returns the protocol object in the database given a certain name. Raises
an error if that does not exist."""
return self.query(Protocol).filter(Protocol.name==name).one()
def protocol_purposes(self):
"""Returns all registered protocol purposes"""
return list(self.query(ProtocolPurpose))
def purposes(self):
"""Returns the list of allowed purposes"""
return ProtocolPurpose.purpose_choices
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/query.py
|
Database.has_client_id
|
python
|
def has_client_id(self, id):
return self.query(Client).filter(Client.id==id).count() != 0
|
Returns True if we have a client with a certain integer identifier
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L142-L145
| null |
class Database(xbob.db.verification.utils.SQLiteDatabase,xbob.db.verification.utils.Database):
"""The dataset class opens and maintains a connection opened to the Database.
It provides many different ways to probe for the characteristics of the data
and for the data itself inside the database.
"""
def __init__(self, original_directory = None, original_extension = db_file_extension):
# call base class constructor
xbob.db.verification.utils.SQLiteDatabase.__init__(self, SQLITE_FILE, File)
xbob.db.verification.utils.Database.__init__(self, original_directory=original_directory, original_extension=original_extension)
def __group_replace_eval_by_genuine__(self, l):
"""Replace 'eval' by 'Genuine' and returns the new list"""
if not l: return l
elif isinstance(l, six.string_types): return self.__group_replace_eval_by_genuine__((l,))
l2 = []
for val in l:
if (val == 'eval'): l2.append('Genuine')
elif (val in Client.type_choices): l2.append(val)
return tuple(set(l2))
def groups(self, protocol=None):
"""Returns the names of all registered groups"""
return ProtocolPurpose.group_choices
def client_types(self):
"""Returns the names of the types."""
return Client.type_choices
def client_groups(self):
"""Returns the names of the groups. This is specific to this database which
does not have separate training, development and evaluation sets."""
return ProtocolPurpose.group_choices
def clients(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the clients which have the given properties.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", self.client_types())
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
q = q.order_by(Client.id)
return list(q)
def models(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Models correspond to Clients for this database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('Genuine')
Note that 'dev', 'eval' and 'world' are alias for 'Genuine'.
Returns: A list containing all the models (model <-> client in BiosecurID) belonging
to the given group.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", ('Genuine',))
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
else:
q = q.filter(Client.stype.in_(['Genuine']))
q = q.order_by(Client.id)
return list(q)
def model_ids(self, protocol=None, groups=None):
"""Returns a list of model ids for the specific query by the user.
Models correspond to Clients for the XM2VTS database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('dev', 'eval', 'world')
Note that 'dev', 'eval' and 'world' are alias for 'client'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging
to the given group.
"""
return [client.subid for client in self.models(protocol, groups)]
def client(self, id):
"""Returns the client object in the database given a certain id. Raises
an error if that does not exist."""
return self.query(Client).filter(Client.id==id).one()
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
"""Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
"""
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval)) # To remove duplicates
def protocol_names(self):
"""Returns all registered protocol names"""
l = self.protocols()
retval = [str(k.name) for k in l]
return retval
def protocols(self):
"""Returns all registered protocols"""
return list(self.query(Protocol))
def has_protocol(self, name):
"""Tells if a certain protocol is available"""
return self.query(Protocol).filter(Protocol.name==name).count() != 0
def protocol(self, name):
"""Returns the protocol object in the database given a certain name. Raises
an error if that does not exist."""
return self.query(Protocol).filter(Protocol.name==name).one()
def protocol_purposes(self):
"""Returns all registered protocol purposes"""
return list(self.query(ProtocolPurpose))
def purposes(self):
"""Returns the list of allowed purposes"""
return ProtocolPurpose.purpose_choices
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/query.py
|
Database.client
|
python
|
def client(self, id):
return self.query(Client).filter(Client.id==id).one()
|
Returns the client object in the database given a certain id. Raises
an error if that does not exist.
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L147-L151
| null |
class Database(xbob.db.verification.utils.SQLiteDatabase,xbob.db.verification.utils.Database):
"""The dataset class opens and maintains a connection opened to the Database.
It provides many different ways to probe for the characteristics of the data
and for the data itself inside the database.
"""
def __init__(self, original_directory = None, original_extension = db_file_extension):
# call base class constructor
xbob.db.verification.utils.SQLiteDatabase.__init__(self, SQLITE_FILE, File)
xbob.db.verification.utils.Database.__init__(self, original_directory=original_directory, original_extension=original_extension)
def __group_replace_eval_by_genuine__(self, l):
"""Replace 'eval' by 'Genuine' and returns the new list"""
if not l: return l
elif isinstance(l, six.string_types): return self.__group_replace_eval_by_genuine__((l,))
l2 = []
for val in l:
if (val == 'eval'): l2.append('Genuine')
elif (val in Client.type_choices): l2.append(val)
return tuple(set(l2))
def groups(self, protocol=None):
"""Returns the names of all registered groups"""
return ProtocolPurpose.group_choices
def client_types(self):
"""Returns the names of the types."""
return Client.type_choices
def client_groups(self):
"""Returns the names of the groups. This is specific to this database which
does not have separate training, development and evaluation sets."""
return ProtocolPurpose.group_choices
def clients(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the clients which have the given properties.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", self.client_types())
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
q = q.order_by(Client.id)
return list(q)
def models(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Models correspond to Clients for this database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('Genuine')
Note that 'dev', 'eval' and 'world' are alias for 'Genuine'.
Returns: A list containing all the models (model <-> client in BiosecurID) belonging
to the given group.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", ('Genuine',))
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
else:
q = q.filter(Client.stype.in_(['Genuine']))
q = q.order_by(Client.id)
return list(q)
def model_ids(self, protocol=None, groups=None):
"""Returns a list of model ids for the specific query by the user.
Models correspond to Clients for the XM2VTS database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('dev', 'eval', 'world')
Note that 'dev', 'eval' and 'world' are alias for 'client'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging
to the given group.
"""
return [client.subid for client in self.models(protocol, groups)]
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
"""Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
"""
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval)) # To remove duplicates
def protocol_names(self):
"""Returns all registered protocol names"""
l = self.protocols()
retval = [str(k.name) for k in l]
return retval
def protocols(self):
"""Returns all registered protocols"""
return list(self.query(Protocol))
def has_protocol(self, name):
"""Tells if a certain protocol is available"""
return self.query(Protocol).filter(Protocol.name==name).count() != 0
def protocol(self, name):
"""Returns the protocol object in the database given a certain name. Raises
an error if that does not exist."""
return self.query(Protocol).filter(Protocol.name==name).one()
def protocol_purposes(self):
"""Returns all registered protocol purposes"""
return list(self.query(ProtocolPurpose))
def purposes(self):
"""Returns the list of allowed purposes"""
return ProtocolPurpose.purpose_choices
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/query.py
|
Database.objects
|
python
|
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval))
|
Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L153-L230
| null |
class Database(xbob.db.verification.utils.SQLiteDatabase,xbob.db.verification.utils.Database):
"""The dataset class opens and maintains a connection opened to the Database.
It provides many different ways to probe for the characteristics of the data
and for the data itself inside the database.
"""
def __init__(self, original_directory = None, original_extension = db_file_extension):
# call base class constructor
xbob.db.verification.utils.SQLiteDatabase.__init__(self, SQLITE_FILE, File)
xbob.db.verification.utils.Database.__init__(self, original_directory=original_directory, original_extension=original_extension)
def __group_replace_eval_by_genuine__(self, l):
"""Replace 'eval' by 'Genuine' and returns the new list"""
if not l: return l
elif isinstance(l, six.string_types): return self.__group_replace_eval_by_genuine__((l,))
l2 = []
for val in l:
if (val == 'eval'): l2.append('Genuine')
elif (val in Client.type_choices): l2.append(val)
return tuple(set(l2))
def groups(self, protocol=None):
"""Returns the names of all registered groups"""
return ProtocolPurpose.group_choices
def client_types(self):
"""Returns the names of the types."""
return Client.type_choices
def client_groups(self):
"""Returns the names of the groups. This is specific to this database which
does not have separate training, development and evaluation sets."""
return ProtocolPurpose.group_choices
def clients(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the clients which have the given properties.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", self.client_types())
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
q = q.order_by(Client.id)
return list(q)
def models(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Models correspond to Clients for this database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('Genuine')
Note that 'dev', 'eval' and 'world' are alias for 'Genuine'.
Returns: A list containing all the models (model <-> client in BiosecurID) belonging
to the given group.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", ('Genuine',))
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
else:
q = q.filter(Client.stype.in_(['Genuine']))
q = q.order_by(Client.id)
return list(q)
def model_ids(self, protocol=None, groups=None):
"""Returns a list of model ids for the specific query by the user.
Models correspond to Clients for the XM2VTS database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('dev', 'eval', 'world')
Note that 'dev', 'eval' and 'world' are alias for 'client'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging
to the given group.
"""
return [client.subid for client in self.models(protocol, groups)]
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0
def client(self, id):
"""Returns the client object in the database given a certain id. Raises
an error if that does not exist."""
return self.query(Client).filter(Client.id==id).one()
# To remove duplicates
def protocol_names(self):
"""Returns all registered protocol names"""
l = self.protocols()
retval = [str(k.name) for k in l]
return retval
def protocols(self):
"""Returns all registered protocols"""
return list(self.query(Protocol))
def has_protocol(self, name):
"""Tells if a certain protocol is available"""
return self.query(Protocol).filter(Protocol.name==name).count() != 0
def protocol(self, name):
"""Returns the protocol object in the database given a certain name. Raises
an error if that does not exist."""
return self.query(Protocol).filter(Protocol.name==name).one()
def protocol_purposes(self):
"""Returns all registered protocol purposes"""
return list(self.query(ProtocolPurpose))
def purposes(self):
"""Returns the list of allowed purposes"""
return ProtocolPurpose.purpose_choices
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/query.py
|
Database.protocol_names
|
python
|
def protocol_names(self):
l = self.protocols()
retval = [str(k.name) for k in l]
return retval
|
Returns all registered protocol names
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L232-L237
| null |
class Database(xbob.db.verification.utils.SQLiteDatabase,xbob.db.verification.utils.Database):
"""The dataset class opens and maintains a connection opened to the Database.
It provides many different ways to probe for the characteristics of the data
and for the data itself inside the database.
"""
def __init__(self, original_directory = None, original_extension = db_file_extension):
# call base class constructor
xbob.db.verification.utils.SQLiteDatabase.__init__(self, SQLITE_FILE, File)
xbob.db.verification.utils.Database.__init__(self, original_directory=original_directory, original_extension=original_extension)
def __group_replace_eval_by_genuine__(self, l):
"""Replace 'eval' by 'Genuine' and returns the new list"""
if not l: return l
elif isinstance(l, six.string_types): return self.__group_replace_eval_by_genuine__((l,))
l2 = []
for val in l:
if (val == 'eval'): l2.append('Genuine')
elif (val in Client.type_choices): l2.append(val)
return tuple(set(l2))
def groups(self, protocol=None):
"""Returns the names of all registered groups"""
return ProtocolPurpose.group_choices
def client_types(self):
"""Returns the names of the types."""
return Client.type_choices
def client_groups(self):
"""Returns the names of the groups. This is specific to this database which
does not have separate training, development and evaluation sets."""
return ProtocolPurpose.group_choices
def clients(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the clients which have the given properties.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", self.client_types())
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
q = q.order_by(Client.id)
return list(q)
def models(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Models correspond to Clients for this database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('Genuine')
Note that 'dev', 'eval' and 'world' are alias for 'Genuine'.
Returns: A list containing all the models (model <-> client in BiosecurID) belonging
to the given group.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", ('Genuine',))
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
else:
q = q.filter(Client.stype.in_(['Genuine']))
q = q.order_by(Client.id)
return list(q)
def model_ids(self, protocol=None, groups=None):
"""Returns a list of model ids for the specific query by the user.
Models correspond to Clients for the XM2VTS database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('dev', 'eval', 'world')
Note that 'dev', 'eval' and 'world' are alias for 'client'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging
to the given group.
"""
return [client.subid for client in self.models(protocol, groups)]
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0
def client(self, id):
"""Returns the client object in the database given a certain id. Raises
an error if that does not exist."""
return self.query(Client).filter(Client.id==id).one()
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
"""Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
"""
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval)) # To remove duplicates
def protocols(self):
"""Returns all registered protocols"""
return list(self.query(Protocol))
def has_protocol(self, name):
"""Tells if a certain protocol is available"""
return self.query(Protocol).filter(Protocol.name==name).count() != 0
def protocol(self, name):
"""Returns the protocol object in the database given a certain name. Raises
an error if that does not exist."""
return self.query(Protocol).filter(Protocol.name==name).one()
def protocol_purposes(self):
"""Returns all registered protocol purposes"""
return list(self.query(ProtocolPurpose))
def purposes(self):
"""Returns the list of allowed purposes"""
return ProtocolPurpose.purpose_choices
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/query.py
|
Database.has_protocol
|
python
|
def has_protocol(self, name):
return self.query(Protocol).filter(Protocol.name==name).count() != 0
|
Tells if a certain protocol is available
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L244-L247
| null |
class Database(xbob.db.verification.utils.SQLiteDatabase,xbob.db.verification.utils.Database):
"""The dataset class opens and maintains a connection opened to the Database.
It provides many different ways to probe for the characteristics of the data
and for the data itself inside the database.
"""
def __init__(self, original_directory = None, original_extension = db_file_extension):
# call base class constructor
xbob.db.verification.utils.SQLiteDatabase.__init__(self, SQLITE_FILE, File)
xbob.db.verification.utils.Database.__init__(self, original_directory=original_directory, original_extension=original_extension)
def __group_replace_eval_by_genuine__(self, l):
"""Replace 'eval' by 'Genuine' and returns the new list"""
if not l: return l
elif isinstance(l, six.string_types): return self.__group_replace_eval_by_genuine__((l,))
l2 = []
for val in l:
if (val == 'eval'): l2.append('Genuine')
elif (val in Client.type_choices): l2.append(val)
return tuple(set(l2))
def groups(self, protocol=None):
"""Returns the names of all registered groups"""
return ProtocolPurpose.group_choices
def client_types(self):
"""Returns the names of the types."""
return Client.type_choices
def client_groups(self):
"""Returns the names of the groups. This is specific to this database which
does not have separate training, development and evaluation sets."""
return ProtocolPurpose.group_choices
def clients(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the clients which have the given properties.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", self.client_types())
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
q = q.order_by(Client.id)
return list(q)
def models(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Models correspond to Clients for this database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('Genuine')
Note that 'dev', 'eval' and 'world' are alias for 'Genuine'.
Returns: A list containing all the models (model <-> client in BiosecurID) belonging
to the given group.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", ('Genuine',))
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
else:
q = q.filter(Client.stype.in_(['Genuine']))
q = q.order_by(Client.id)
return list(q)
def model_ids(self, protocol=None, groups=None):
"""Returns a list of model ids for the specific query by the user.
Models correspond to Clients for the XM2VTS database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('dev', 'eval', 'world')
Note that 'dev', 'eval' and 'world' are alias for 'client'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging
to the given group.
"""
return [client.subid for client in self.models(protocol, groups)]
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0
def client(self, id):
"""Returns the client object in the database given a certain id. Raises
an error if that does not exist."""
return self.query(Client).filter(Client.id==id).one()
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
"""Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
"""
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval)) # To remove duplicates
def protocol_names(self):
"""Returns all registered protocol names"""
l = self.protocols()
retval = [str(k.name) for k in l]
return retval
def protocols(self):
"""Returns all registered protocols"""
return list(self.query(Protocol))
def protocol(self, name):
"""Returns the protocol object in the database given a certain name. Raises
an error if that does not exist."""
return self.query(Protocol).filter(Protocol.name==name).one()
def protocol_purposes(self):
"""Returns all registered protocol purposes"""
return list(self.query(ProtocolPurpose))
def purposes(self):
"""Returns the list of allowed purposes"""
return ProtocolPurpose.purpose_choices
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/query.py
|
Database.protocol
|
python
|
def protocol(self, name):
return self.query(Protocol).filter(Protocol.name==name).one()
|
Returns the protocol object in the database given a certain name. Raises
an error if that does not exist.
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/query.py#L249-L253
| null |
class Database(xbob.db.verification.utils.SQLiteDatabase,xbob.db.verification.utils.Database):
"""The dataset class opens and maintains a connection opened to the Database.
It provides many different ways to probe for the characteristics of the data
and for the data itself inside the database.
"""
def __init__(self, original_directory = None, original_extension = db_file_extension):
# call base class constructor
xbob.db.verification.utils.SQLiteDatabase.__init__(self, SQLITE_FILE, File)
xbob.db.verification.utils.Database.__init__(self, original_directory=original_directory, original_extension=original_extension)
def __group_replace_eval_by_genuine__(self, l):
"""Replace 'eval' by 'Genuine' and returns the new list"""
if not l: return l
elif isinstance(l, six.string_types): return self.__group_replace_eval_by_genuine__((l,))
l2 = []
for val in l:
if (val == 'eval'): l2.append('Genuine')
elif (val in Client.type_choices): l2.append(val)
return tuple(set(l2))
def groups(self, protocol=None):
"""Returns the names of all registered groups"""
return ProtocolPurpose.group_choices
def client_types(self):
"""Returns the names of the types."""
return Client.type_choices
def client_groups(self):
"""Returns the names of the groups. This is specific to this database which
does not have separate training, development and evaluation sets."""
return ProtocolPurpose.group_choices
def clients(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Keyword Parameters:
protocol
Ignored.
groups
The groups (types) to which the clients belong either from ('Genuine', 'Impostor')
Note that 'eval' is an alias for 'Genuine'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the clients which have the given properties.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", self.client_types())
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
q = q.order_by(Client.id)
return list(q)
def models(self, protocol=None, groups=None):
"""Returns a list of :py:class:`.Client` for the specific query by the user.
Models correspond to Clients for this database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('Genuine')
Note that 'dev', 'eval' and 'world' are alias for 'Genuine'.
Returns: A list containing all the models (model <-> client in BiosecurID) belonging
to the given group.
"""
groups = self.__group_replace_eval_by_genuine__(groups)
groups = self.check_parameters_for_validity(groups, "group", ('Genuine',))
# List of the clients
q = self.query(Client)
if groups:
q = q.filter(Client.stype.in_(groups))
else:
q = q.filter(Client.stype.in_(['Genuine']))
q = q.order_by(Client.id)
return list(q)
def model_ids(self, protocol=None, groups=None):
"""Returns a list of model ids for the specific query by the user.
Models correspond to Clients for the XM2VTS database (At most one model per identity).
Keyword Parameters:
protocol
Ignored.
groups
The groups to which the subjects attached to the models belong ('dev', 'eval', 'world')
Note that 'dev', 'eval' and 'world' are alias for 'client'.
If no groups are specified, then both clients are impostors are listed.
Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging
to the given group.
"""
return [client.subid for client in self.models(protocol, groups)]
def has_client_id(self, id):
"""Returns True if we have a client with a certain integer identifier"""
return self.query(Client).filter(Client.id==id).count() != 0
def client(self, id):
"""Returns the client object in the database given a certain id. Raises
an error if that does not exist."""
return self.query(Client).filter(Client.id==id).one()
def objects(self, protocol=None, purposes=None, model_ids=None, groups=None,
classes=None):
"""Returns a list of :py:class:`.File` for the specific query by the user.
Keyword Parameters:
protocol
One of the Biosecurid protocols ('A').
purposes
The purposes required to be retrieved ('enrol', 'probe') or a tuple
with several of them. If 'None' is given (this is the default), it is
considered the same as a tuple with all possible values. This field is
ignored for the data from the "world" group.
model_ids
Only retrieves the files for the provided list of model ids (claimed
client id). The model ids are string. If 'None' is given (this is
the default), no filter over the model_ids is performed.
groups
One of the groups ('dev', 'eval', 'world') or a tuple with several of them.
If 'None' is given (this is the default), it is considered the same as a
tuple with all possible values.
classes
The classes (types of accesses) to be retrieved ('client', 'impostor')
or a tuple with several of them. If 'None' is given (this is the
default), it is considered the same as a tuple with all possible values.
Returns: A list of :py:class:`.File` objects.
"""
#groups = self.__group_replace_alias_clients__(groups)
protocol = self.check_parameters_for_validity(protocol, "protocol", self.protocol_names())
purposes = self.check_parameters_for_validity(purposes, "purpose", self.purposes())
groups = self.check_parameters_for_validity(groups, "group", self.groups())
classes = self.check_parameters_for_validity(classes, "class", ('client', 'impostor'))
import collections
if(model_ids is None):
model_ids = ()
elif(not isinstance(model_ids,collections.Iterable)):
model_ids = (model_ids,)
# Now query the database
retval = []
if ('eval' in groups):
if('enrol' in purposes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'enrol'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('probe' in purposes):
if('client' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Genuine'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
if('impostor' in classes):
q = self.query(File).join(Client).join((ProtocolPurpose, File.protocolPurposes)).join(Protocol).\
filter(Client.stype.in_(['Impostor'])).\
filter(and_(Protocol.name.in_(protocol), ProtocolPurpose.sgroup.in_(groups), ProtocolPurpose.purpose == 'probe'))
if model_ids:
q = q.filter(Client.subid.in_(model_ids))
q = q.order_by(File.client_id, File.session_id, File.shot_id)
retval += list(q)
return list(set(retval)) # To remove duplicates
def protocol_names(self):
"""Returns all registered protocol names"""
l = self.protocols()
retval = [str(k.name) for k in l]
return retval
def protocols(self):
"""Returns all registered protocols"""
return list(self.query(Protocol))
def has_protocol(self, name):
"""Tells if a certain protocol is available"""
return self.query(Protocol).filter(Protocol.name==name).count() != 0
def protocol_purposes(self):
"""Returns all registered protocol purposes"""
return list(self.query(ProtocolPurpose))
def purposes(self):
"""Returns the list of allowed purposes"""
return ProtocolPurpose.purpose_choices
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
add_clients
|
python
|
def add_clients(session, verbose):
for ctype in ['Genuine', 'Impostor']:
for cdid in userid_clients:
cid = ctype + '_%d' % cdid
if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype))
session.add(Client(cid, ctype, cdid))
|
Add clients to the ATVS Keystroke database.
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L30-L36
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This script creates the BiosecurId database in a single pass.
"""
import os,string
from .models import *
# clients
userid_clients = range(1, 64)
def nodot(item):
"""Can be used to ignore hidden files, starting with the . character."""
return item[0] != '.'
def add_files(session, imagedir, verbose):
"""Add files to the ATVS Keystroke database."""
def add_file(session, basename, userid, shotid, sessionid):
"""Parse a single filename and add it to the list."""
session.add(File(userid, basename, sessionid, shotid))
filenames = os.listdir(imagedir)
for filename in filenames:
basename, extension = os.path.splitext(filename)
if extension == db_file_extension:
if verbose>1: print(" Adding file '%s'..." % (basename))
parts = string.split(basename, "_")
ctype = parts[0]
shotid = int(parts[2])
userid = ctype + '_%d' % int(parts[1])
if parts[0] == "Impostor":
sessionid = 3
elif parts[0] == "Genuine" and shotid <= 6:
sessionid = 1
elif parts[0] == "Genuine" and shotid > 6:
sessionid = 2
shotid = shotid - 6
add_file(session, basename, userid, shotid, sessionid)
def add_protocols(session, verbose):
"""Adds protocols"""
# 1. DEFINITIONS
enroll_session = [1]
client_probe_session = [2]
impostor_probe_session = [3]
protocols = ['A']
# 2. ADDITIONS TO THE SQL DATABASE
protocolPurpose_list = [('eval', 'enrol'), ('eval', 'probe')]
for proto in protocols:
p = Protocol(proto)
# Add protocol
if verbose: print("Adding protocol %s..." % (proto))
session.add(p)
session.flush()
session.refresh(p)
# Add protocol purposes
for key in range(len(protocolPurpose_list)):
purpose = protocolPurpose_list[key]
print p.id, purpose[0], purpose[1]
pu = ProtocolPurpose(p.id, purpose[0], purpose[1])
if verbose>1: print(" Adding protocol purpose ('%s','%s')..." % (purpose[0], purpose[1]))
session.add(pu)
session.flush()
session.refresh(pu)
# Add files attached with this protocol purpose
if(key == 0): #test enrol
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(enroll_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
elif(key == 1): #test probe
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(client_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
q = session.query(File).join(Client).filter(Client.stype == 'Impostor').filter(File.session_id.in_(impostor_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
def create_tables(args):
"""Creates all necessary tables (only to be used at the first time)"""
from bob.db.utils import create_engine_try_nolock
engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose > 2))
Base.metadata.create_all(engine)
# Driver API
# ==========
def create(args):
"""Creates or re-creates this database"""
from bob.db.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print('unlinking %s...' % dbfile)
if os.path.exists(dbfile): os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, dbfile, echo=(args.verbose > 2))
add_clients(s, args.verbose)
add_files(s, args.imagedir, args.verbose)
add_protocols(s, args.verbose)
s.commit()
s.close()
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', help="Do SQL operations in a verbose way?")
parser.add_argument('-D', '--imagedir', metavar='DIR', default='/home/bob/BTAS_Keystroke_files_SingleFile', help="Change the relative path to the directory containing the images of the ATVS Keystroke database.")
parser.set_defaults(func=create) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
add_files
|
python
|
def add_files(session, imagedir, verbose):
def add_file(session, basename, userid, shotid, sessionid):
"""Parse a single filename and add it to the list."""
session.add(File(userid, basename, sessionid, shotid))
filenames = os.listdir(imagedir)
for filename in filenames:
basename, extension = os.path.splitext(filename)
if extension == db_file_extension:
if verbose>1: print(" Adding file '%s'..." % (basename))
parts = string.split(basename, "_")
ctype = parts[0]
shotid = int(parts[2])
userid = ctype + '_%d' % int(parts[1])
if parts[0] == "Impostor":
sessionid = 3
elif parts[0] == "Genuine" and shotid <= 6:
sessionid = 1
elif parts[0] == "Genuine" and shotid > 6:
sessionid = 2
shotid = shotid - 6
add_file(session, basename, userid, shotid, sessionid)
|
Add files to the ATVS Keystroke database.
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L39-L62
|
[
"def add_file(session, basename, userid, shotid, sessionid):\n \"\"\"Parse a single filename and add it to the list.\"\"\"\n session.add(File(userid, basename, sessionid, shotid))\n"
] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This script creates the BiosecurId database in a single pass.
"""
import os,string
from .models import *
# clients
userid_clients = range(1, 64)
def nodot(item):
"""Can be used to ignore hidden files, starting with the . character."""
return item[0] != '.'
def add_clients(session, verbose):
"""Add clients to the ATVS Keystroke database."""
for ctype in ['Genuine', 'Impostor']:
for cdid in userid_clients:
cid = ctype + '_%d' % cdid
if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype))
session.add(Client(cid, ctype, cdid))
def add_protocols(session, verbose):
"""Adds protocols"""
# 1. DEFINITIONS
enroll_session = [1]
client_probe_session = [2]
impostor_probe_session = [3]
protocols = ['A']
# 2. ADDITIONS TO THE SQL DATABASE
protocolPurpose_list = [('eval', 'enrol'), ('eval', 'probe')]
for proto in protocols:
p = Protocol(proto)
# Add protocol
if verbose: print("Adding protocol %s..." % (proto))
session.add(p)
session.flush()
session.refresh(p)
# Add protocol purposes
for key in range(len(protocolPurpose_list)):
purpose = protocolPurpose_list[key]
print p.id, purpose[0], purpose[1]
pu = ProtocolPurpose(p.id, purpose[0], purpose[1])
if verbose>1: print(" Adding protocol purpose ('%s','%s')..." % (purpose[0], purpose[1]))
session.add(pu)
session.flush()
session.refresh(pu)
# Add files attached with this protocol purpose
if(key == 0): #test enrol
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(enroll_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
elif(key == 1): #test probe
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(client_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
q = session.query(File).join(Client).filter(Client.stype == 'Impostor').filter(File.session_id.in_(impostor_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
def create_tables(args):
"""Creates all necessary tables (only to be used at the first time)"""
from bob.db.utils import create_engine_try_nolock
engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose > 2))
Base.metadata.create_all(engine)
# Driver API
# ==========
def create(args):
"""Creates or re-creates this database"""
from bob.db.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print('unlinking %s...' % dbfile)
if os.path.exists(dbfile): os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, dbfile, echo=(args.verbose > 2))
add_clients(s, args.verbose)
add_files(s, args.imagedir, args.verbose)
add_protocols(s, args.verbose)
s.commit()
s.close()
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', help="Do SQL operations in a verbose way?")
parser.add_argument('-D', '--imagedir', metavar='DIR', default='/home/bob/BTAS_Keystroke_files_SingleFile', help="Change the relative path to the directory containing the images of the ATVS Keystroke database.")
parser.set_defaults(func=create) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
add_protocols
|
python
|
def add_protocols(session, verbose):
# 1. DEFINITIONS
enroll_session = [1]
client_probe_session = [2]
impostor_probe_session = [3]
protocols = ['A']
# 2. ADDITIONS TO THE SQL DATABASE
protocolPurpose_list = [('eval', 'enrol'), ('eval', 'probe')]
for proto in protocols:
p = Protocol(proto)
# Add protocol
if verbose: print("Adding protocol %s..." % (proto))
session.add(p)
session.flush()
session.refresh(p)
# Add protocol purposes
for key in range(len(protocolPurpose_list)):
purpose = protocolPurpose_list[key]
print p.id, purpose[0], purpose[1]
pu = ProtocolPurpose(p.id, purpose[0], purpose[1])
if verbose>1: print(" Adding protocol purpose ('%s','%s')..." % (purpose[0], purpose[1]))
session.add(pu)
session.flush()
session.refresh(pu)
# Add files attached with this protocol purpose
if(key == 0): #test enrol
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(enroll_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
elif(key == 1): #test probe
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(client_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
q = session.query(File).join(Client).filter(Client.stype == 'Impostor').filter(File.session_id.in_(impostor_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
|
Adds protocols
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L65-L109
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This script creates the BiosecurId database in a single pass.
"""
import os,string
from .models import *
# clients
userid_clients = range(1, 64)
def nodot(item):
"""Can be used to ignore hidden files, starting with the . character."""
return item[0] != '.'
def add_clients(session, verbose):
"""Add clients to the ATVS Keystroke database."""
for ctype in ['Genuine', 'Impostor']:
for cdid in userid_clients:
cid = ctype + '_%d' % cdid
if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype))
session.add(Client(cid, ctype, cdid))
def add_files(session, imagedir, verbose):
"""Add files to the ATVS Keystroke database."""
def add_file(session, basename, userid, shotid, sessionid):
"""Parse a single filename and add it to the list."""
session.add(File(userid, basename, sessionid, shotid))
filenames = os.listdir(imagedir)
for filename in filenames:
basename, extension = os.path.splitext(filename)
if extension == db_file_extension:
if verbose>1: print(" Adding file '%s'..." % (basename))
parts = string.split(basename, "_")
ctype = parts[0]
shotid = int(parts[2])
userid = ctype + '_%d' % int(parts[1])
if parts[0] == "Impostor":
sessionid = 3
elif parts[0] == "Genuine" and shotid <= 6:
sessionid = 1
elif parts[0] == "Genuine" and shotid > 6:
sessionid = 2
shotid = shotid - 6
add_file(session, basename, userid, shotid, sessionid)
def create_tables(args):
"""Creates all necessary tables (only to be used at the first time)"""
from bob.db.utils import create_engine_try_nolock
engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose > 2))
Base.metadata.create_all(engine)
# Driver API
# ==========
def create(args):
"""Creates or re-creates this database"""
from bob.db.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print('unlinking %s...' % dbfile)
if os.path.exists(dbfile): os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, dbfile, echo=(args.verbose > 2))
add_clients(s, args.verbose)
add_files(s, args.imagedir, args.verbose)
add_protocols(s, args.verbose)
s.commit()
s.close()
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', help="Do SQL operations in a verbose way?")
parser.add_argument('-D', '--imagedir', metavar='DIR', default='/home/bob/BTAS_Keystroke_files_SingleFile', help="Change the relative path to the directory containing the images of the ATVS Keystroke database.")
parser.set_defaults(func=create) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
create_tables
|
python
|
def create_tables(args):
from bob.db.utils import create_engine_try_nolock
engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose > 2))
Base.metadata.create_all(engine)
|
Creates all necessary tables (only to be used at the first time)
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L112-L117
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This script creates the BiosecurId database in a single pass.
"""
import os,string
from .models import *
# clients
userid_clients = range(1, 64)
def nodot(item):
"""Can be used to ignore hidden files, starting with the . character."""
return item[0] != '.'
def add_clients(session, verbose):
"""Add clients to the ATVS Keystroke database."""
for ctype in ['Genuine', 'Impostor']:
for cdid in userid_clients:
cid = ctype + '_%d' % cdid
if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype))
session.add(Client(cid, ctype, cdid))
def add_files(session, imagedir, verbose):
"""Add files to the ATVS Keystroke database."""
def add_file(session, basename, userid, shotid, sessionid):
"""Parse a single filename and add it to the list."""
session.add(File(userid, basename, sessionid, shotid))
filenames = os.listdir(imagedir)
for filename in filenames:
basename, extension = os.path.splitext(filename)
if extension == db_file_extension:
if verbose>1: print(" Adding file '%s'..." % (basename))
parts = string.split(basename, "_")
ctype = parts[0]
shotid = int(parts[2])
userid = ctype + '_%d' % int(parts[1])
if parts[0] == "Impostor":
sessionid = 3
elif parts[0] == "Genuine" and shotid <= 6:
sessionid = 1
elif parts[0] == "Genuine" and shotid > 6:
sessionid = 2
shotid = shotid - 6
add_file(session, basename, userid, shotid, sessionid)
def add_protocols(session, verbose):
"""Adds protocols"""
# 1. DEFINITIONS
enroll_session = [1]
client_probe_session = [2]
impostor_probe_session = [3]
protocols = ['A']
# 2. ADDITIONS TO THE SQL DATABASE
protocolPurpose_list = [('eval', 'enrol'), ('eval', 'probe')]
for proto in protocols:
p = Protocol(proto)
# Add protocol
if verbose: print("Adding protocol %s..." % (proto))
session.add(p)
session.flush()
session.refresh(p)
# Add protocol purposes
for key in range(len(protocolPurpose_list)):
purpose = protocolPurpose_list[key]
print p.id, purpose[0], purpose[1]
pu = ProtocolPurpose(p.id, purpose[0], purpose[1])
if verbose>1: print(" Adding protocol purpose ('%s','%s')..." % (purpose[0], purpose[1]))
session.add(pu)
session.flush()
session.refresh(pu)
# Add files attached with this protocol purpose
if(key == 0): #test enrol
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(enroll_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
elif(key == 1): #test probe
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(client_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
q = session.query(File).join(Client).filter(Client.stype == 'Impostor').filter(File.session_id.in_(impostor_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
# Driver API
# ==========
def create(args):
"""Creates or re-creates this database"""
from bob.db.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print('unlinking %s...' % dbfile)
if os.path.exists(dbfile): os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, dbfile, echo=(args.verbose > 2))
add_clients(s, args.verbose)
add_files(s, args.imagedir, args.verbose)
add_protocols(s, args.verbose)
s.commit()
s.close()
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', help="Do SQL operations in a verbose way?")
parser.add_argument('-D', '--imagedir', metavar='DIR', default='/home/bob/BTAS_Keystroke_files_SingleFile', help="Change the relative path to the directory containing the images of the ATVS Keystroke database.")
parser.set_defaults(func=create) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
create
|
python
|
def create(args):
from bob.db.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print('unlinking %s...' % dbfile)
if os.path.exists(dbfile): os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, dbfile, echo=(args.verbose > 2))
add_clients(s, args.verbose)
add_files(s, args.imagedir, args.verbose)
add_protocols(s, args.verbose)
s.commit()
s.close()
|
Creates or re-creates this database
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L122-L144
|
[
"def add_clients(session, verbose):\n \"\"\"Add clients to the ATVS Keystroke database.\"\"\"\n for ctype in ['Genuine', 'Impostor']:\n for cdid in userid_clients:\n cid = ctype + '_%d' % cdid\n if verbose>1: print(\" Adding user '%s' of type '%s'...\" % (cid, ctype))\n session.add(Client(cid, ctype, cdid))\n",
"def add_files(session, imagedir, verbose):\n \"\"\"Add files to the ATVS Keystroke database.\"\"\"\n\n def add_file(session, basename, userid, shotid, sessionid):\n \"\"\"Parse a single filename and add it to the list.\"\"\"\n session.add(File(userid, basename, sessionid, shotid))\n\n filenames = os.listdir(imagedir)\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension == db_file_extension:\n if verbose>1: print(\" Adding file '%s'...\" % (basename))\n parts = string.split(basename, \"_\")\n ctype = parts[0]\n shotid = int(parts[2])\n userid = ctype + '_%d' % int(parts[1])\n if parts[0] == \"Impostor\":\n sessionid = 3\n elif parts[0] == \"Genuine\" and shotid <= 6:\n sessionid = 1\n elif parts[0] == \"Genuine\" and shotid > 6:\n sessionid = 2\n shotid = shotid - 6\n add_file(session, basename, userid, shotid, sessionid)\n",
"def add_protocols(session, verbose):\n \"\"\"Adds protocols\"\"\"\n\n # 1. DEFINITIONS\n enroll_session = [1]\n client_probe_session = [2]\n impostor_probe_session = [3]\n protocols = ['A']\n\n # 2. ADDITIONS TO THE SQL DATABASE\n protocolPurpose_list = [('eval', 'enrol'), ('eval', 'probe')]\n for proto in protocols:\n p = Protocol(proto)\n # Add protocol\n if verbose: print(\"Adding protocol %s...\" % (proto))\n session.add(p)\n session.flush()\n session.refresh(p)\n\n # Add protocol purposes\n for key in range(len(protocolPurpose_list)):\n purpose = protocolPurpose_list[key]\n print p.id, purpose[0], purpose[1]\n pu = ProtocolPurpose(p.id, purpose[0], purpose[1])\n if verbose>1: print(\" Adding protocol purpose ('%s','%s')...\" % (purpose[0], purpose[1]))\n session.add(pu)\n session.flush()\n session.refresh(pu)\n\n # Add files attached with this protocol purpose\n if(key == 0): #test enrol\n q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(enroll_session))\n for k in q:\n if verbose>1: print(\" Adding protocol file '%s'...\" % (k.path))\n pu.files.append(k)\n\n elif(key == 1): #test probe\n q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(client_probe_session))\n for k in q:\n if verbose>1: print(\" Adding protocol file '%s'...\" % (k.path))\n pu.files.append(k)\n q = session.query(File).join(Client).filter(Client.stype == 'Impostor').filter(File.session_id.in_(impostor_probe_session))\n for k in q:\n if verbose>1: print(\" Adding protocol file '%s'...\" % (k.path))\n pu.files.append(k)\n",
"def create_tables(args):\n \"\"\"Creates all necessary tables (only to be used at the first time)\"\"\"\n\n from bob.db.utils import create_engine_try_nolock\n engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose > 2))\n Base.metadata.create_all(engine)\n"
] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This script creates the BiosecurId database in a single pass.
"""
import os,string
from .models import *
# clients
userid_clients = range(1, 64)
def nodot(item):
"""Can be used to ignore hidden files, starting with the . character."""
return item[0] != '.'
def add_clients(session, verbose):
"""Add clients to the ATVS Keystroke database."""
for ctype in ['Genuine', 'Impostor']:
for cdid in userid_clients:
cid = ctype + '_%d' % cdid
if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype))
session.add(Client(cid, ctype, cdid))
def add_files(session, imagedir, verbose):
"""Add files to the ATVS Keystroke database."""
def add_file(session, basename, userid, shotid, sessionid):
"""Parse a single filename and add it to the list."""
session.add(File(userid, basename, sessionid, shotid))
filenames = os.listdir(imagedir)
for filename in filenames:
basename, extension = os.path.splitext(filename)
if extension == db_file_extension:
if verbose>1: print(" Adding file '%s'..." % (basename))
parts = string.split(basename, "_")
ctype = parts[0]
shotid = int(parts[2])
userid = ctype + '_%d' % int(parts[1])
if parts[0] == "Impostor":
sessionid = 3
elif parts[0] == "Genuine" and shotid <= 6:
sessionid = 1
elif parts[0] == "Genuine" and shotid > 6:
sessionid = 2
shotid = shotid - 6
add_file(session, basename, userid, shotid, sessionid)
def add_protocols(session, verbose):
"""Adds protocols"""
# 1. DEFINITIONS
enroll_session = [1]
client_probe_session = [2]
impostor_probe_session = [3]
protocols = ['A']
# 2. ADDITIONS TO THE SQL DATABASE
protocolPurpose_list = [('eval', 'enrol'), ('eval', 'probe')]
for proto in protocols:
p = Protocol(proto)
# Add protocol
if verbose: print("Adding protocol %s..." % (proto))
session.add(p)
session.flush()
session.refresh(p)
# Add protocol purposes
for key in range(len(protocolPurpose_list)):
purpose = protocolPurpose_list[key]
print p.id, purpose[0], purpose[1]
pu = ProtocolPurpose(p.id, purpose[0], purpose[1])
if verbose>1: print(" Adding protocol purpose ('%s','%s')..." % (purpose[0], purpose[1]))
session.add(pu)
session.flush()
session.refresh(pu)
# Add files attached with this protocol purpose
if(key == 0): #test enrol
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(enroll_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
elif(key == 1): #test probe
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(client_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
q = session.query(File).join(Client).filter(Client.stype == 'Impostor').filter(File.session_id.in_(impostor_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
def create_tables(args):
"""Creates all necessary tables (only to be used at the first time)"""
from bob.db.utils import create_engine_try_nolock
engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose > 2))
Base.metadata.create_all(engine)
# Driver API
# ==========
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', help="Do SQL operations in a verbose way?")
parser.add_argument('-D', '--imagedir', metavar='DIR', default='/home/bob/BTAS_Keystroke_files_SingleFile', help="Change the relative path to the directory containing the images of the ATVS Keystroke database.")
parser.set_defaults(func=create) #action
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
add_command
|
python
|
def add_command(subparsers):
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', help="Do SQL operations in a verbose way?")
parser.add_argument('-D', '--imagedir', metavar='DIR', default='/home/bob/BTAS_Keystroke_files_SingleFile', help="Change the relative path to the directory containing the images of the ATVS Keystroke database.")
parser.set_defaults(func=create)
|
Add specific subcommands that the action "create" can use
|
train
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L146-L155
| null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This script creates the BiosecurId database in a single pass.
"""
import os,string
from .models import *
# clients
userid_clients = range(1, 64)
def nodot(item):
"""Can be used to ignore hidden files, starting with the . character."""
return item[0] != '.'
def add_clients(session, verbose):
"""Add clients to the ATVS Keystroke database."""
for ctype in ['Genuine', 'Impostor']:
for cdid in userid_clients:
cid = ctype + '_%d' % cdid
if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype))
session.add(Client(cid, ctype, cdid))
def add_files(session, imagedir, verbose):
"""Add files to the ATVS Keystroke database."""
def add_file(session, basename, userid, shotid, sessionid):
"""Parse a single filename and add it to the list."""
session.add(File(userid, basename, sessionid, shotid))
filenames = os.listdir(imagedir)
for filename in filenames:
basename, extension = os.path.splitext(filename)
if extension == db_file_extension:
if verbose>1: print(" Adding file '%s'..." % (basename))
parts = string.split(basename, "_")
ctype = parts[0]
shotid = int(parts[2])
userid = ctype + '_%d' % int(parts[1])
if parts[0] == "Impostor":
sessionid = 3
elif parts[0] == "Genuine" and shotid <= 6:
sessionid = 1
elif parts[0] == "Genuine" and shotid > 6:
sessionid = 2
shotid = shotid - 6
add_file(session, basename, userid, shotid, sessionid)
def add_protocols(session, verbose):
"""Adds protocols"""
# 1. DEFINITIONS
enroll_session = [1]
client_probe_session = [2]
impostor_probe_session = [3]
protocols = ['A']
# 2. ADDITIONS TO THE SQL DATABASE
protocolPurpose_list = [('eval', 'enrol'), ('eval', 'probe')]
for proto in protocols:
p = Protocol(proto)
# Add protocol
if verbose: print("Adding protocol %s..." % (proto))
session.add(p)
session.flush()
session.refresh(p)
# Add protocol purposes
for key in range(len(protocolPurpose_list)):
purpose = protocolPurpose_list[key]
print p.id, purpose[0], purpose[1]
pu = ProtocolPurpose(p.id, purpose[0], purpose[1])
if verbose>1: print(" Adding protocol purpose ('%s','%s')..." % (purpose[0], purpose[1]))
session.add(pu)
session.flush()
session.refresh(pu)
# Add files attached with this protocol purpose
if(key == 0): #test enrol
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(enroll_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
elif(key == 1): #test probe
q = session.query(File).join(Client).filter(Client.stype == 'Genuine').filter(File.session_id.in_(client_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
q = session.query(File).join(Client).filter(Client.stype == 'Impostor').filter(File.session_id.in_(impostor_probe_session))
for k in q:
if verbose>1: print(" Adding protocol file '%s'..." % (k.path))
pu.files.append(k)
def create_tables(args):
"""Creates all necessary tables (only to be used at the first time)"""
from bob.db.utils import create_engine_try_nolock
engine = create_engine_try_nolock(args.type, args.files[0], echo=(args.verbose > 2))
Base.metadata.create_all(engine)
# Driver API
# ==========
def create(args):
"""Creates or re-creates this database"""
from bob.db.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print('unlinking %s...' % dbfile)
if os.path.exists(dbfile): os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, dbfile, echo=(args.verbose > 2))
add_clients(s, args.verbose)
add_files(s, args.imagedir, args.verbose)
add_protocols(s, args.verbose)
s.commit()
s.close()
#action
|
RonenNess/Fileter
|
fileter/iterators/concat_files.py
|
ConcatFiles.process_file
|
python
|
def process_file(self, path, dryrun):
# special case - skip output file so we won't include it in result
if path == self._output_path:
return None
# if dryrun skip and return file
if dryrun:
return path
# concat file with output file
with open(path, "rb") as infile:
data = infile.read()
self._output_file.write(data)
# return processed file path
return path
|
Concat files and return filename.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/concat_files.py#L41-L59
| null |
class ConcatFiles(files_iterator.FilesIterator):
"""
This files iterator concat all scanned files.
"""
def __init__(self, outfile):
"""
concat all source files into one output file.
:param outfile: output file path.
"""
super(ConcatFiles, self).__init__()
self._output_path = outfile
self._output_file = None
def on_start(self, dryrun):
"""
Open the output file.
"""
if not dryrun:
self._output_file = open(self._output_path, "wb")
def on_end(self, dryrun):
"""
Close the output file.
"""
if not dryrun:
self._output_file.close()
|
RonenNess/Fileter
|
fileter/iterators/add_header.py
|
AddHeader.process_file
|
python
|
def process_file(self, path, dryrun):
if dryrun:
return path
# get file's current header
with open(path, "r") as infile:
head = infile.read(len(self.__header))
# normalize line breaks
if self.__normalize_br:
head = head.replace("\r\n", "\n")
# already contain header? skip
if head == self.__header:
return path
# add header to file
self.push_header(path)
# return processed file
return path
|
Add header to all files.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/add_header.py#L40-L63
| null |
class AddHeader(files_iterator.FilesIterator):
"""
This iterator will add a constant header to all files, unless header already exist.
For example, this can be used to add
#!/usr/bin/python
# -*- coding: utf-8 -*-
To all python files.
"""
def __init__(self, header, normalize_br=False):
"""
Add header to files.
:param header: header to add to all files.
:param normalize_br: if True, will normalize \r\n into \n.
"""
super(AddHeader, self).__init__()
# normalize line breaks
if normalize_br:
header = header.replace("\r\n", "\n")
# set header and if we want to normalize br
self.__header = header
self.__normalize_br = normalize_br
def push_header(self, filename):
"""
Push the header to a given filename
:param filename: the file path to push into.
"""
# open file and read it all
with open(filename, "r") as infile:
content = infile.read()
# push header
content = self.__header + content
# re-write file with the header
with open(filename, "w") as outfile:
outfile.write(content)
|
RonenNess/Fileter
|
fileter/iterators/add_header.py
|
AddHeader.push_header
|
python
|
def push_header(self, filename):
# open file and read it all
with open(filename, "r") as infile:
content = infile.read()
# push header
content = self.__header + content
# re-write file with the header
with open(filename, "w") as outfile:
outfile.write(content)
|
Push the header to a given filename
:param filename: the file path to push into.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/add_header.py#L65-L79
| null |
class AddHeader(files_iterator.FilesIterator):
"""
This iterator will add a constant header to all files, unless header already exist.
For example, this can be used to add
#!/usr/bin/python
# -*- coding: utf-8 -*-
To all python files.
"""
def __init__(self, header, normalize_br=False):
"""
Add header to files.
:param header: header to add to all files.
:param normalize_br: if True, will normalize \r\n into \n.
"""
super(AddHeader, self).__init__()
# normalize line breaks
if normalize_br:
header = header.replace("\r\n", "\n")
# set header and if we want to normalize br
self.__header = header
self.__normalize_br = normalize_br
def process_file(self, path, dryrun):
"""
Add header to all files.
"""
if dryrun:
return path
# get file's current header
with open(path, "r") as infile:
head = infile.read(len(self.__header))
# normalize line breaks
if self.__normalize_br:
head = head.replace("\r\n", "\n")
# already contain header? skip
if head == self.__header:
return path
# add header to file
self.push_header(path)
# return processed file
return path
|
RonenNess/Fileter
|
fileter/iterators/grep.py
|
Grep.process_file
|
python
|
def process_file(self, path, dryrun):
# if dryrun just return files
if dryrun:
return path
# scan file and match lines
ret = []
with open(path, "r") as infile:
for line in infile:
if re.search(self.__exp, line):
ret.append(line)
# if found matches return list of lines, else return None
return ret if len(ret) > 0 else None
|
Print files path.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/grep.py#L39-L55
| null |
class Grep(files_iterator.FilesIterator):
"""
Iterate over files and return lines that match the grep condition.
Return a list of lists: for every file return the list of occurances found in it.
"""
def __init__(self, expression):
"""
Init the grep iterator.
:param expression: the grep expression to look for.
"""
super(Grep, self).__init__()
self.__exp = expression
def set_grep(self, expression):
"""
Change / set the grep expression.
"""
self.__exp = expression
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_folder
|
python
|
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
self.add_source(FolderSource(path, depth, **source_type))
return self
|
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L87-L96
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def match_filters(self, path):
"""
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
"""
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_pattern
|
python
|
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
|
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L98-L108
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FolderSource(path, depth, **source_type))
return self
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def match_filters(self, path):
"""
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
"""
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filtered_folder
|
python
|
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
|
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L110-L119
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FolderSource(path, depth, **source_type))
return self
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def match_filters(self, path):
"""
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
"""
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filter
|
python
|
def add_filter(self, files_filter, filter_type=DefaultFilterType):
self.__filters.append((files_filter, filter_type))
return self
|
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L121-L130
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FolderSource(path, depth, **source_type))
return self
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def match_filters(self, path):
"""
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
"""
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filter_by_pattern
|
python
|
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
self.add_filter(FilterPattern(pattern), filter_type)
return self
|
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L132-L139
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FolderSource(path, depth, **source_type))
return self
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def match_filters(self, path):
"""
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
"""
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filter_by_regex
|
python
|
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
|
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L141-L148
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FolderSource(path, depth, **source_type))
return self
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def match_filters(self, path):
"""
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
"""
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.add_filter_by_extension
|
python
|
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
self.add_filter(FilterExtension(extensions), filter_type)
return self
|
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L150-L158
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FolderSource(path, depth, **source_type))
return self
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def match_filters(self, path):
"""
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
"""
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.next
|
python
|
def next(self, dryrun=False):
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
|
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L190-L236
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FolderSource(path, depth, **source_type))
return self
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def match_filters(self, path):
"""
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
"""
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.