repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
jupyterhub/chartpress | chartpress.py | render_build_args | def render_build_args(options, ns):
"""Get docker build args dict, rendering any templated args.
Args:
options (dict):
The dictionary for a given image from chartpress.yaml.
Fields in `options['buildArgs']` will be rendered and returned,
if defined.
ns (dict): the namespace used when rendering templated arguments
"""
build_args = options.get('buildArgs', {})
for key, value in build_args.items():
build_args[key] = value.format(**ns)
return build_args | python | def render_build_args(options, ns):
"""Get docker build args dict, rendering any templated args.
Args:
options (dict):
The dictionary for a given image from chartpress.yaml.
Fields in `options['buildArgs']` will be rendered and returned,
if defined.
ns (dict): the namespace used when rendering templated arguments
"""
build_args = options.get('buildArgs', {})
for key, value in build_args.items():
build_args[key] = value.format(**ns)
return build_args | [
"def",
"render_build_args",
"(",
"options",
",",
"ns",
")",
":",
"build_args",
"=",
"options",
".",
"get",
"(",
"'buildArgs'",
",",
"{",
"}",
")",
"for",
"key",
",",
"value",
"in",
"build_args",
".",
"items",
"(",
")",
":",
"build_args",
"[",
"key",
... | Get docker build args dict, rendering any templated args.
Args:
options (dict):
The dictionary for a given image from chartpress.yaml.
Fields in `options['buildArgs']` will be rendered and returned,
if defined.
ns (dict): the namespace used when rendering templated arguments | [
"Get",
"docker",
"build",
"args",
"dict",
"rendering",
"any",
"templated",
"args",
"."
] | 541f132f31c9f3a66750d7847fb28c7ce5a0ca6d | https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L98-L111 | train | 42,400 |
jupyterhub/chartpress | chartpress.py | build_image | def build_image(image_path, image_name, build_args=None, dockerfile_path=None):
"""Build an image
Args:
image_path (str): the path to the image directory
image_name (str): image 'name:tag' to build
build_args (dict, optional): dict of docker build arguments
dockerfile_path (str, optional):
path to dockerfile relative to image_path
if not `image_path/Dockerfile`.
"""
cmd = ['docker', 'build', '-t', image_name, image_path]
if dockerfile_path:
cmd.extend(['-f', dockerfile_path])
for k, v in (build_args or {}).items():
cmd += ['--build-arg', '{}={}'.format(k, v)]
check_call(cmd) | python | def build_image(image_path, image_name, build_args=None, dockerfile_path=None):
"""Build an image
Args:
image_path (str): the path to the image directory
image_name (str): image 'name:tag' to build
build_args (dict, optional): dict of docker build arguments
dockerfile_path (str, optional):
path to dockerfile relative to image_path
if not `image_path/Dockerfile`.
"""
cmd = ['docker', 'build', '-t', image_name, image_path]
if dockerfile_path:
cmd.extend(['-f', dockerfile_path])
for k, v in (build_args or {}).items():
cmd += ['--build-arg', '{}={}'.format(k, v)]
check_call(cmd) | [
"def",
"build_image",
"(",
"image_path",
",",
"image_name",
",",
"build_args",
"=",
"None",
",",
"dockerfile_path",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"'docker'",
",",
"'build'",
",",
"'-t'",
",",
"image_name",
",",
"image_path",
"]",
"if",
"dockerfil... | Build an image
Args:
image_path (str): the path to the image directory
image_name (str): image 'name:tag' to build
build_args (dict, optional): dict of docker build arguments
dockerfile_path (str, optional):
path to dockerfile relative to image_path
if not `image_path/Dockerfile`. | [
"Build",
"an",
"image"
] | 541f132f31c9f3a66750d7847fb28c7ce5a0ca6d | https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L114-L131 | train | 42,401 |
jupyterhub/chartpress | chartpress.py | image_needs_pushing | def image_needs_pushing(image):
"""Return whether an image needs pushing
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be pushed (not on registry)
False: if not (already present on registry)
"""
d = docker_client()
try:
d.images.get_registry_data(image)
except docker.errors.APIError:
# image not found on registry, needs pushing
return True
else:
return False | python | def image_needs_pushing(image):
"""Return whether an image needs pushing
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be pushed (not on registry)
False: if not (already present on registry)
"""
d = docker_client()
try:
d.images.get_registry_data(image)
except docker.errors.APIError:
# image not found on registry, needs pushing
return True
else:
return False | [
"def",
"image_needs_pushing",
"(",
"image",
")",
":",
"d",
"=",
"docker_client",
"(",
")",
"try",
":",
"d",
".",
"images",
".",
"get_registry_data",
"(",
"image",
")",
"except",
"docker",
".",
"errors",
".",
"APIError",
":",
"# image not found on registry, nee... | Return whether an image needs pushing
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be pushed (not on registry)
False: if not (already present on registry) | [
"Return",
"whether",
"an",
"image",
"needs",
"pushing"
] | 541f132f31c9f3a66750d7847fb28c7ce5a0ca6d | https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L140-L159 | train | 42,402 |
jupyterhub/chartpress | chartpress.py | image_needs_building | def image_needs_building(image):
"""Return whether an image needs building
Checks if the image exists (ignores commit range),
either locally or on the registry.
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be built
False: if not (image already exists)
"""
d = docker_client()
# first, check for locally built image
try:
d.images.get(image)
except docker.errors.ImageNotFound:
# image not found, check registry
pass
else:
# it exists locally, no need to check remote
return False
# image may need building if it's not on the registry
return image_needs_pushing(image) | python | def image_needs_building(image):
"""Return whether an image needs building
Checks if the image exists (ignores commit range),
either locally or on the registry.
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be built
False: if not (image already exists)
"""
d = docker_client()
# first, check for locally built image
try:
d.images.get(image)
except docker.errors.ImageNotFound:
# image not found, check registry
pass
else:
# it exists locally, no need to check remote
return False
# image may need building if it's not on the registry
return image_needs_pushing(image) | [
"def",
"image_needs_building",
"(",
"image",
")",
":",
"d",
"=",
"docker_client",
"(",
")",
"# first, check for locally built image",
"try",
":",
"d",
".",
"images",
".",
"get",
"(",
"image",
")",
"except",
"docker",
".",
"errors",
".",
"ImageNotFound",
":",
... | Return whether an image needs building
Checks if the image exists (ignores commit range),
either locally or on the registry.
Args:
image (str): the `repository:tag` image to be build.
Returns:
True: if image needs to be built
False: if not (image already exists) | [
"Return",
"whether",
"an",
"image",
"needs",
"building"
] | 541f132f31c9f3a66750d7847fb28c7ce5a0ca6d | https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L163-L191 | train | 42,403 |
jupyterhub/chartpress | chartpress.py | build_images | def build_images(prefix, images, tag=None, commit_range=None, push=False, chart_version=None):
"""Build a collection of docker images
Args:
prefix (str): the prefix to add to images
images (dict): dict of image-specs from chartpress.yml
tag (str):
Specific tag to use instead of the last modified commit.
If unspecified the tag for each image will be the hash of the last commit
to modify the image's files.
commit_range (str):
The range of commits to consider, e.g. for building in CI.
If an image hasn't changed in the given range,
it will not be rebuilt.
push (bool):
Whether to push the resulting images (default: False).
chart_version (str):
The chart version, included as a prefix on image tags
if `tag` is not specified.
"""
value_modifications = {}
for name, options in images.items():
image_path = options.get('contextPath', os.path.join('images', name))
image_tag = tag
# include chartpress.yaml itself as it can contain build args and
# similar that influence the image that would be built
paths = list(options.get('paths', [])) + [image_path, 'chartpress.yaml']
last_commit = last_modified_commit(*paths)
if tag is None:
if chart_version:
image_tag = "{}-{}".format(chart_version, last_commit)
else:
image_tag = last_commit
image_name = prefix + name
image_spec = '{}:{}'.format(image_name, image_tag)
value_modifications[options['valuesPath']] = {
'repository': image_name,
'tag': SingleQuotedScalarString(image_tag),
}
template_namespace = {
'LAST_COMMIT': last_commit,
'TAG': image_tag,
}
if tag or image_needs_building(image_spec):
build_args = render_build_args(options, template_namespace)
build_image(image_path, image_spec, build_args, options.get('dockerfilePath'))
else:
print(f"Skipping build for {image_spec}, it already exists")
if push:
if tag or image_needs_pushing(image_spec):
check_call([
'docker', 'push', image_spec
])
else:
print(f"Skipping push for {image_spec}, already on registry")
return value_modifications | python | def build_images(prefix, images, tag=None, commit_range=None, push=False, chart_version=None):
"""Build a collection of docker images
Args:
prefix (str): the prefix to add to images
images (dict): dict of image-specs from chartpress.yml
tag (str):
Specific tag to use instead of the last modified commit.
If unspecified the tag for each image will be the hash of the last commit
to modify the image's files.
commit_range (str):
The range of commits to consider, e.g. for building in CI.
If an image hasn't changed in the given range,
it will not be rebuilt.
push (bool):
Whether to push the resulting images (default: False).
chart_version (str):
The chart version, included as a prefix on image tags
if `tag` is not specified.
"""
value_modifications = {}
for name, options in images.items():
image_path = options.get('contextPath', os.path.join('images', name))
image_tag = tag
# include chartpress.yaml itself as it can contain build args and
# similar that influence the image that would be built
paths = list(options.get('paths', [])) + [image_path, 'chartpress.yaml']
last_commit = last_modified_commit(*paths)
if tag is None:
if chart_version:
image_tag = "{}-{}".format(chart_version, last_commit)
else:
image_tag = last_commit
image_name = prefix + name
image_spec = '{}:{}'.format(image_name, image_tag)
value_modifications[options['valuesPath']] = {
'repository': image_name,
'tag': SingleQuotedScalarString(image_tag),
}
template_namespace = {
'LAST_COMMIT': last_commit,
'TAG': image_tag,
}
if tag or image_needs_building(image_spec):
build_args = render_build_args(options, template_namespace)
build_image(image_path, image_spec, build_args, options.get('dockerfilePath'))
else:
print(f"Skipping build for {image_spec}, it already exists")
if push:
if tag or image_needs_pushing(image_spec):
check_call([
'docker', 'push', image_spec
])
else:
print(f"Skipping push for {image_spec}, already on registry")
return value_modifications | [
"def",
"build_images",
"(",
"prefix",
",",
"images",
",",
"tag",
"=",
"None",
",",
"commit_range",
"=",
"None",
",",
"push",
"=",
"False",
",",
"chart_version",
"=",
"None",
")",
":",
"value_modifications",
"=",
"{",
"}",
"for",
"name",
",",
"options",
... | Build a collection of docker images
Args:
prefix (str): the prefix to add to images
images (dict): dict of image-specs from chartpress.yml
tag (str):
Specific tag to use instead of the last modified commit.
If unspecified the tag for each image will be the hash of the last commit
to modify the image's files.
commit_range (str):
The range of commits to consider, e.g. for building in CI.
If an image hasn't changed in the given range,
it will not be rebuilt.
push (bool):
Whether to push the resulting images (default: False).
chart_version (str):
The chart version, included as a prefix on image tags
if `tag` is not specified. | [
"Build",
"a",
"collection",
"of",
"docker",
"images"
] | 541f132f31c9f3a66750d7847fb28c7ce5a0ca6d | https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L194-L253 | train | 42,404 |
jupyterhub/chartpress | chartpress.py | publish_pages | def publish_pages(name, paths, git_repo, published_repo, extra_message=''):
"""Publish helm chart index to github pages"""
version = last_modified_commit(*paths)
checkout_dir = '{}-{}'.format(name, version)
check_call([
'git', 'clone', '--no-checkout',
git_remote(git_repo), checkout_dir],
echo=False,
)
check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)
# package the latest version into a temporary directory
# and run helm repo index with --merge to update index.yaml
# without refreshing all of the timestamps
with TemporaryDirectory() as td:
check_call([
'helm', 'package', name,
'--destination', td + '/',
])
check_call([
'helm', 'repo', 'index', td,
'--url', published_repo,
'--merge', os.path.join(checkout_dir, 'index.yaml'),
])
# equivalent to `cp td/* checkout/`
# copies new helm chart and updated index.yaml
for f in os.listdir(td):
shutil.copy2(
os.path.join(td, f),
os.path.join(checkout_dir, f)
)
check_call(['git', 'add', '.'], cwd=checkout_dir)
if extra_message:
extra_message = '\n\n%s' % extra_message
else:
extra_message = ''
check_call([
'git',
'commit',
'-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message)
], cwd=checkout_dir)
check_call(
['git', 'push', 'origin', 'gh-pages'],
cwd=checkout_dir,
) | python | def publish_pages(name, paths, git_repo, published_repo, extra_message=''):
"""Publish helm chart index to github pages"""
version = last_modified_commit(*paths)
checkout_dir = '{}-{}'.format(name, version)
check_call([
'git', 'clone', '--no-checkout',
git_remote(git_repo), checkout_dir],
echo=False,
)
check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)
# package the latest version into a temporary directory
# and run helm repo index with --merge to update index.yaml
# without refreshing all of the timestamps
with TemporaryDirectory() as td:
check_call([
'helm', 'package', name,
'--destination', td + '/',
])
check_call([
'helm', 'repo', 'index', td,
'--url', published_repo,
'--merge', os.path.join(checkout_dir, 'index.yaml'),
])
# equivalent to `cp td/* checkout/`
# copies new helm chart and updated index.yaml
for f in os.listdir(td):
shutil.copy2(
os.path.join(td, f),
os.path.join(checkout_dir, f)
)
check_call(['git', 'add', '.'], cwd=checkout_dir)
if extra_message:
extra_message = '\n\n%s' % extra_message
else:
extra_message = ''
check_call([
'git',
'commit',
'-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message)
], cwd=checkout_dir)
check_call(
['git', 'push', 'origin', 'gh-pages'],
cwd=checkout_dir,
) | [
"def",
"publish_pages",
"(",
"name",
",",
"paths",
",",
"git_repo",
",",
"published_repo",
",",
"extra_message",
"=",
"''",
")",
":",
"version",
"=",
"last_modified_commit",
"(",
"*",
"paths",
")",
"checkout_dir",
"=",
"'{}-{}'",
".",
"format",
"(",
"name",
... | Publish helm chart index to github pages | [
"Publish",
"helm",
"chart",
"index",
"to",
"github",
"pages"
] | 541f132f31c9f3a66750d7847fb28c7ce5a0ca6d | https://github.com/jupyterhub/chartpress/blob/541f132f31c9f3a66750d7847fb28c7ce5a0ca6d/chartpress.py#L312-L358 | train | 42,405 |
devopshq/youtrack | youtrack/import_helper.py | add_values_to_bundle_safe | def add_values_to_bundle_safe(connection, bundle, values):
"""
Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries.
"""
for value in values:
try:
connection.addValueToBundle(bundle, value)
except YouTrackException as e:
if e.response.status == 409:
print("Value with name [ %s ] already exists in bundle [ %s ]" %
(utf8encode(value.name), utf8encode(bundle.name)))
else:
raise e | python | def add_values_to_bundle_safe(connection, bundle, values):
"""
Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries.
"""
for value in values:
try:
connection.addValueToBundle(bundle, value)
except YouTrackException as e:
if e.response.status == 409:
print("Value with name [ %s ] already exists in bundle [ %s ]" %
(utf8encode(value.name), utf8encode(bundle.name)))
else:
raise e | [
"def",
"add_values_to_bundle_safe",
"(",
"connection",
",",
"bundle",
",",
"values",
")",
":",
"for",
"value",
"in",
"values",
":",
"try",
":",
"connection",
".",
"addValueToBundle",
"(",
"bundle",
",",
"value",
")",
"except",
"YouTrackException",
"as",
"e",
... | Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries. | [
"Adds",
"values",
"to",
"specified",
"bundle",
".",
"Checks",
"whether",
"each",
"value",
"already",
"contains",
"in",
"bundle",
".",
"If",
"yes",
"it",
"is",
"not",
"added",
"."
] | c4ec19aca253ae30ac8eee7976a2f330e480a73b | https://github.com/devopshq/youtrack/blob/c4ec19aca253ae30ac8eee7976a2f330e480a73b/youtrack/import_helper.py#L139-L159 | train | 42,406 |
jpadilla/django-rest-framework-yaml | rest_framework_yaml/parsers.py | YAMLParser.parse | def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as YAML and returns the resulting data.
"""
assert yaml, 'YAMLParser requires pyyaml to be installed'
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return yaml.safe_load(data)
except (ValueError, yaml.parser.ParserError) as exc:
raise ParseError('YAML parse error - %s' % six.text_type(exc)) | python | def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as YAML and returns the resulting data.
"""
assert yaml, 'YAMLParser requires pyyaml to be installed'
parser_context = parser_context or {}
encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
try:
data = stream.read().decode(encoding)
return yaml.safe_load(data)
except (ValueError, yaml.parser.ParserError) as exc:
raise ParseError('YAML parse error - %s' % six.text_type(exc)) | [
"def",
"parse",
"(",
"self",
",",
"stream",
",",
"media_type",
"=",
"None",
",",
"parser_context",
"=",
"None",
")",
":",
"assert",
"yaml",
",",
"'YAMLParser requires pyyaml to be installed'",
"parser_context",
"=",
"parser_context",
"or",
"{",
"}",
"encoding",
... | Parses the incoming bytestream as YAML and returns the resulting data. | [
"Parses",
"the",
"incoming",
"bytestream",
"as",
"YAML",
"and",
"returns",
"the",
"resulting",
"data",
"."
] | 4067e59874cdfe33fa36a25f275d3c91fffa15fe | https://github.com/jpadilla/django-rest-framework-yaml/blob/4067e59874cdfe33fa36a25f275d3c91fffa15fe/rest_framework_yaml/parsers.py#L21-L34 | train | 42,407 |
jpadilla/django-rest-framework-yaml | rest_framework_yaml/renderers.py | YAMLRenderer.render | def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized YAML.
"""
assert yaml, 'YAMLRenderer requires pyyaml to be installed'
if data is None:
return ''
return yaml.dump(
data,
stream=None,
encoding=self.charset,
Dumper=self.encoder,
allow_unicode=not self.ensure_ascii,
default_flow_style=self.default_flow_style
) | python | def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized YAML.
"""
assert yaml, 'YAMLRenderer requires pyyaml to be installed'
if data is None:
return ''
return yaml.dump(
data,
stream=None,
encoding=self.charset,
Dumper=self.encoder,
allow_unicode=not self.ensure_ascii,
default_flow_style=self.default_flow_style
) | [
"def",
"render",
"(",
"self",
",",
"data",
",",
"accepted_media_type",
"=",
"None",
",",
"renderer_context",
"=",
"None",
")",
":",
"assert",
"yaml",
",",
"'YAMLRenderer requires pyyaml to be installed'",
"if",
"data",
"is",
"None",
":",
"return",
"''",
"return"... | Renders `data` into serialized YAML. | [
"Renders",
"data",
"into",
"serialized",
"YAML",
"."
] | 4067e59874cdfe33fa36a25f275d3c91fffa15fe | https://github.com/jpadilla/django-rest-framework-yaml/blob/4067e59874cdfe33fa36a25f275d3c91fffa15fe/rest_framework_yaml/renderers.py#L24-L40 | train | 42,408 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel._toplevel | def _toplevel(cls):
"""Find the top level of the chain we're in.
For example, if we have:
C inheriting from B inheriting from A inheriting from ClosureModel
C._toplevel() will return A.
"""
superclasses = (
list(set(ClosureModel.__subclasses__()) &
set(cls._meta.get_parent_list()))
)
return next(iter(superclasses)) if superclasses else cls | python | def _toplevel(cls):
"""Find the top level of the chain we're in.
For example, if we have:
C inheriting from B inheriting from A inheriting from ClosureModel
C._toplevel() will return A.
"""
superclasses = (
list(set(ClosureModel.__subclasses__()) &
set(cls._meta.get_parent_list()))
)
return next(iter(superclasses)) if superclasses else cls | [
"def",
"_toplevel",
"(",
"cls",
")",
":",
"superclasses",
"=",
"(",
"list",
"(",
"set",
"(",
"ClosureModel",
".",
"__subclasses__",
"(",
")",
")",
"&",
"set",
"(",
"cls",
".",
"_meta",
".",
"get_parent_list",
"(",
")",
")",
")",
")",
"return",
"next"... | Find the top level of the chain we're in.
For example, if we have:
C inheriting from B inheriting from A inheriting from ClosureModel
C._toplevel() will return A. | [
"Find",
"the",
"top",
"level",
"of",
"the",
"chain",
"we",
"re",
"in",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L120-L131 | train | 42,409 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel.rebuildtable | def rebuildtable(cls):
"""Regenerate the entire closuretree."""
cls._closure_model.objects.all().delete()
cls._closure_model.objects.bulk_create([cls._closure_model(
parent_id=x['pk'],
child_id=x['pk'],
depth=0
) for x in cls.objects.values("pk")])
for node in cls.objects.all():
node._closure_createlink() | python | def rebuildtable(cls):
"""Regenerate the entire closuretree."""
cls._closure_model.objects.all().delete()
cls._closure_model.objects.bulk_create([cls._closure_model(
parent_id=x['pk'],
child_id=x['pk'],
depth=0
) for x in cls.objects.values("pk")])
for node in cls.objects.all():
node._closure_createlink() | [
"def",
"rebuildtable",
"(",
"cls",
")",
":",
"cls",
".",
"_closure_model",
".",
"objects",
".",
"all",
"(",
")",
".",
"delete",
"(",
")",
"cls",
".",
"_closure_model",
".",
"objects",
".",
"bulk_create",
"(",
"[",
"cls",
".",
"_closure_model",
"(",
"pa... | Regenerate the entire closuretree. | [
"Regenerate",
"the",
"entire",
"closuretree",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L134-L143 | train | 42,410 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel._closure_parent_pk | def _closure_parent_pk(self):
"""What our parent pk is in the closure tree."""
if hasattr(self, "%s_id" % self._closure_parent_attr):
return getattr(self, "%s_id" % self._closure_parent_attr)
else:
parent = getattr(self, self._closure_parent_attr)
return parent.pk if parent else None | python | def _closure_parent_pk(self):
"""What our parent pk is in the closure tree."""
if hasattr(self, "%s_id" % self._closure_parent_attr):
return getattr(self, "%s_id" % self._closure_parent_attr)
else:
parent = getattr(self, self._closure_parent_attr)
return parent.pk if parent else None | [
"def",
"_closure_parent_pk",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"%s_id\"",
"%",
"self",
".",
"_closure_parent_attr",
")",
":",
"return",
"getattr",
"(",
"self",
",",
"\"%s_id\"",
"%",
"self",
".",
"_closure_parent_attr",
")",
"else",... | What our parent pk is in the closure tree. | [
"What",
"our",
"parent",
"pk",
"is",
"in",
"the",
"closure",
"tree",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L176-L182 | train | 42,411 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel._closure_deletelink | def _closure_deletelink(self, oldparentpk):
"""Remove incorrect links from the closure tree."""
self._closure_model.objects.filter(
**{
"parent__%s__child" % self._closure_parentref(): oldparentpk,
"child__%s__parent" % self._closure_childref(): self.pk
}
).delete() | python | def _closure_deletelink(self, oldparentpk):
"""Remove incorrect links from the closure tree."""
self._closure_model.objects.filter(
**{
"parent__%s__child" % self._closure_parentref(): oldparentpk,
"child__%s__parent" % self._closure_childref(): self.pk
}
).delete() | [
"def",
"_closure_deletelink",
"(",
"self",
",",
"oldparentpk",
")",
":",
"self",
".",
"_closure_model",
".",
"objects",
".",
"filter",
"(",
"*",
"*",
"{",
"\"parent__%s__child\"",
"%",
"self",
".",
"_closure_parentref",
"(",
")",
":",
"oldparentpk",
",",
"\"... | Remove incorrect links from the closure tree. | [
"Remove",
"incorrect",
"links",
"from",
"the",
"closure",
"tree",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L184-L191 | train | 42,412 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel._closure_createlink | def _closure_createlink(self):
"""Create a link in the closure tree."""
linkparents = self._closure_model.objects.filter(
child__pk=self._closure_parent_pk
).values("parent", "depth")
linkchildren = self._closure_model.objects.filter(
parent__pk=self.pk
).values("child", "depth")
newlinks = [self._closure_model(
parent_id=p['parent'],
child_id=c['child'],
depth=p['depth']+c['depth']+1
) for p in linkparents for c in linkchildren]
self._closure_model.objects.bulk_create(newlinks) | python | def _closure_createlink(self):
"""Create a link in the closure tree."""
linkparents = self._closure_model.objects.filter(
child__pk=self._closure_parent_pk
).values("parent", "depth")
linkchildren = self._closure_model.objects.filter(
parent__pk=self.pk
).values("child", "depth")
newlinks = [self._closure_model(
parent_id=p['parent'],
child_id=c['child'],
depth=p['depth']+c['depth']+1
) for p in linkparents for c in linkchildren]
self._closure_model.objects.bulk_create(newlinks) | [
"def",
"_closure_createlink",
"(",
"self",
")",
":",
"linkparents",
"=",
"self",
".",
"_closure_model",
".",
"objects",
".",
"filter",
"(",
"child__pk",
"=",
"self",
".",
"_closure_parent_pk",
")",
".",
"values",
"(",
"\"parent\"",
",",
"\"depth\"",
")",
"li... | Create a link in the closure tree. | [
"Create",
"a",
"link",
"in",
"the",
"closure",
"tree",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L193-L206 | train | 42,413 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel.get_ancestors | def get_ancestors(self, include_self=False, depth=None):
"""Return all the ancestors of this object."""
if self.is_root_node():
if not include_self:
return self._toplevel().objects.none()
else:
# Filter on pk for efficiency.
return self._toplevel().objects.filter(pk=self.pk)
params = {"%s__child" % self._closure_parentref():self.pk}
if depth is not None:
params["%s__depth__lte" % self._closure_parentref()] = depth
ancestors = self._toplevel().objects.filter(**params)
if not include_self:
ancestors = ancestors.exclude(pk=self.pk)
return ancestors.order_by("%s__depth" % self._closure_parentref()) | python | def get_ancestors(self, include_self=False, depth=None):
"""Return all the ancestors of this object."""
if self.is_root_node():
if not include_self:
return self._toplevel().objects.none()
else:
# Filter on pk for efficiency.
return self._toplevel().objects.filter(pk=self.pk)
params = {"%s__child" % self._closure_parentref():self.pk}
if depth is not None:
params["%s__depth__lte" % self._closure_parentref()] = depth
ancestors = self._toplevel().objects.filter(**params)
if not include_self:
ancestors = ancestors.exclude(pk=self.pk)
return ancestors.order_by("%s__depth" % self._closure_parentref()) | [
"def",
"get_ancestors",
"(",
"self",
",",
"include_self",
"=",
"False",
",",
"depth",
"=",
"None",
")",
":",
"if",
"self",
".",
"is_root_node",
"(",
")",
":",
"if",
"not",
"include_self",
":",
"return",
"self",
".",
"_toplevel",
"(",
")",
".",
"objects... | Return all the ancestors of this object. | [
"Return",
"all",
"the",
"ancestors",
"of",
"this",
"object",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L208-L223 | train | 42,414 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel.get_descendants | def get_descendants(self, include_self=False, depth=None):
"""Return all the descendants of this object."""
params = {"%s__parent" % self._closure_childref():self.pk}
if depth is not None:
params["%s__depth__lte" % self._closure_childref()] = depth
descendants = self._toplevel().objects.filter(**params)
if not include_self:
descendants = descendants.exclude(pk=self.pk)
return descendants.order_by("%s__depth" % self._closure_childref()) | python | def get_descendants(self, include_self=False, depth=None):
"""Return all the descendants of this object."""
params = {"%s__parent" % self._closure_childref():self.pk}
if depth is not None:
params["%s__depth__lte" % self._closure_childref()] = depth
descendants = self._toplevel().objects.filter(**params)
if not include_self:
descendants = descendants.exclude(pk=self.pk)
return descendants.order_by("%s__depth" % self._closure_childref()) | [
"def",
"get_descendants",
"(",
"self",
",",
"include_self",
"=",
"False",
",",
"depth",
"=",
"None",
")",
":",
"params",
"=",
"{",
"\"%s__parent\"",
"%",
"self",
".",
"_closure_childref",
"(",
")",
":",
"self",
".",
"pk",
"}",
"if",
"depth",
"is",
"not... | Return all the descendants of this object. | [
"Return",
"all",
"the",
"descendants",
"of",
"this",
"object",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L225-L233 | train | 42,415 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel.get_children | def get_children(self):
"""Return all the children of this object."""
if hasattr(self, '_cached_children'):
children = self._toplevel().objects.filter(
pk__in=[n.pk for n in self._cached_children]
)
children._result_cache = self._cached_children
return children
else:
return self.get_descendants(include_self=False, depth=1) | python | def get_children(self):
"""Return all the children of this object."""
if hasattr(self, '_cached_children'):
children = self._toplevel().objects.filter(
pk__in=[n.pk for n in self._cached_children]
)
children._result_cache = self._cached_children
return children
else:
return self.get_descendants(include_self=False, depth=1) | [
"def",
"get_children",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_cached_children'",
")",
":",
"children",
"=",
"self",
".",
"_toplevel",
"(",
")",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"[",
"n",
".",
"pk",
"for",
"n",
... | Return all the children of this object. | [
"Return",
"all",
"the",
"children",
"of",
"this",
"object",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L248-L257 | train | 42,416 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel.get_root | def get_root(self):
"""Return the furthest ancestor of this node."""
if self.is_root_node():
return self
return self.get_ancestors().order_by(
"-%s__depth" % self._closure_parentref()
)[0] | python | def get_root(self):
"""Return the furthest ancestor of this node."""
if self.is_root_node():
return self
return self.get_ancestors().order_by(
"-%s__depth" % self._closure_parentref()
)[0] | [
"def",
"get_root",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_root_node",
"(",
")",
":",
"return",
"self",
"return",
"self",
".",
"get_ancestors",
"(",
")",
".",
"order_by",
"(",
"\"-%s__depth\"",
"%",
"self",
".",
"_closure_parentref",
"(",
")",
")",... | Return the furthest ancestor of this node. | [
"Return",
"the",
"furthest",
"ancestor",
"of",
"this",
"node",
"."
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L259-L266 | train | 42,417 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel.is_descendant_of | def is_descendant_of(self, other, include_self=False):
"""Is this node a descendant of `other`?"""
if other.pk == self.pk:
return include_self
return self._closure_model.objects.filter(
parent=other,
child=self
).exclude(pk=self.pk).exists() | python | def is_descendant_of(self, other, include_self=False):
"""Is this node a descendant of `other`?"""
if other.pk == self.pk:
return include_self
return self._closure_model.objects.filter(
parent=other,
child=self
).exclude(pk=self.pk).exists() | [
"def",
"is_descendant_of",
"(",
"self",
",",
"other",
",",
"include_self",
"=",
"False",
")",
":",
"if",
"other",
".",
"pk",
"==",
"self",
".",
"pk",
":",
"return",
"include_self",
"return",
"self",
".",
"_closure_model",
".",
"objects",
".",
"filter",
"... | Is this node a descendant of `other`? | [
"Is",
"this",
"node",
"a",
"descendant",
"of",
"other",
"?"
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L276-L284 | train | 42,418 |
ocadotechnology/django-closuretree | closuretree/models.py | ClosureModel.is_ancestor_of | def is_ancestor_of(self, other, include_self=False):
"""Is this node an ancestor of `other`?"""
return other.is_descendant_of(self, include_self=include_self) | python | def is_ancestor_of(self, other, include_self=False):
"""Is this node an ancestor of `other`?"""
return other.is_descendant_of(self, include_self=include_self) | [
"def",
"is_ancestor_of",
"(",
"self",
",",
"other",
",",
"include_self",
"=",
"False",
")",
":",
"return",
"other",
".",
"is_descendant_of",
"(",
"self",
",",
"include_self",
"=",
"include_self",
")"
] | Is this node an ancestor of `other`? | [
"Is",
"this",
"node",
"an",
"ancestor",
"of",
"other",
"?"
] | 432717b20907f2e475a28de3605924f69b7d67b5 | https://github.com/ocadotechnology/django-closuretree/blob/432717b20907f2e475a28de3605924f69b7d67b5/closuretree/models.py#L286-L288 | train | 42,419 |
debrouwere/python-ballpark | ballpark/utils.py | quantize | def quantize(number, digits=0, q=builtins.round):
"""
Quantize to somewhere in between a magnitude.
For example:
* ceil(55.25, 1.2) => 55.26
* floor(55.25, 1.2) => 55.24
* round(55.3333, 2.5) => 55.335
* round(12.345, 1.1) == round(12.345, 2) == 12.34
"""
base, fraction = split(digits)
# quantization beyond an order of magnitude results in a variable amount
# of decimal digits depending on the lowest common multiple,
# e.g. floor(1.2341234, 1.25) = 1.225 but floor(1.2341234, 1.5) = 1.20
if fraction * 10 % 1 > 0:
digits = base + 2
else:
digits = base + 1
multiplier = 10 ** base * invert(fraction, default=1)
quantized = q(number * multiplier) / multiplier
# additional rounding step to get rid of floating point math wonkiness
return builtins.round(quantized, digits) | python | def quantize(number, digits=0, q=builtins.round):
"""
Quantize to somewhere in between a magnitude.
For example:
* ceil(55.25, 1.2) => 55.26
* floor(55.25, 1.2) => 55.24
* round(55.3333, 2.5) => 55.335
* round(12.345, 1.1) == round(12.345, 2) == 12.34
"""
base, fraction = split(digits)
# quantization beyond an order of magnitude results in a variable amount
# of decimal digits depending on the lowest common multiple,
# e.g. floor(1.2341234, 1.25) = 1.225 but floor(1.2341234, 1.5) = 1.20
if fraction * 10 % 1 > 0:
digits = base + 2
else:
digits = base + 1
multiplier = 10 ** base * invert(fraction, default=1)
quantized = q(number * multiplier) / multiplier
# additional rounding step to get rid of floating point math wonkiness
return builtins.round(quantized, digits) | [
"def",
"quantize",
"(",
"number",
",",
"digits",
"=",
"0",
",",
"q",
"=",
"builtins",
".",
"round",
")",
":",
"base",
",",
"fraction",
"=",
"split",
"(",
"digits",
")",
"# quantization beyond an order of magnitude results in a variable amount",
"# of decimal digits ... | Quantize to somewhere in between a magnitude.
For example:
* ceil(55.25, 1.2) => 55.26
* floor(55.25, 1.2) => 55.24
* round(55.3333, 2.5) => 55.335
* round(12.345, 1.1) == round(12.345, 2) == 12.34 | [
"Quantize",
"to",
"somewhere",
"in",
"between",
"a",
"magnitude",
"."
] | 0b871cdf5b4b5f50e5f3f3d044558801783381c4 | https://github.com/debrouwere/python-ballpark/blob/0b871cdf5b4b5f50e5f3f3d044558801783381c4/ballpark/utils.py#L44-L70 | train | 42,420 |
debrouwere/python-ballpark | ballpark/utils.py | vectorize | def vectorize(fn):
"""
Allows a method to accept a list argument, but internally deal only
with a single item of that list.
"""
@functools.wraps(fn)
def vectorized_function(values, *vargs, **kwargs):
return [fn(value, *vargs, **kwargs) for value in values]
return vectorized_function | python | def vectorize(fn):
"""
Allows a method to accept a list argument, but internally deal only
with a single item of that list.
"""
@functools.wraps(fn)
def vectorized_function(values, *vargs, **kwargs):
return [fn(value, *vargs, **kwargs) for value in values]
return vectorized_function | [
"def",
"vectorize",
"(",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"vectorized_function",
"(",
"values",
",",
"*",
"vargs",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"[",
"fn",
"(",
"value",
",",
"*",
"vargs",
",",
... | Allows a method to accept a list argument, but internally deal only
with a single item of that list. | [
"Allows",
"a",
"method",
"to",
"accept",
"a",
"list",
"argument",
"but",
"internally",
"deal",
"only",
"with",
"a",
"single",
"item",
"of",
"that",
"list",
"."
] | 0b871cdf5b4b5f50e5f3f3d044558801783381c4 | https://github.com/debrouwere/python-ballpark/blob/0b871cdf5b4b5f50e5f3f3d044558801783381c4/ballpark/utils.py#L100-L110 | train | 42,421 |
debrouwere/python-ballpark | ballpark/notation.py | engineering | def engineering(value, precision=3, prefix=False, prefixes=SI):
""" Convert a number to engineering notation. """
display = decimal.Context(prec=precision)
value = decimal.Decimal(value).normalize(context=display)
string = value.to_eng_string()
if prefix:
prefixes = {e(exponent): prefix for exponent, prefix in prefixes.items()}
return replace(string, prefixes)
else:
return string | python | def engineering(value, precision=3, prefix=False, prefixes=SI):
""" Convert a number to engineering notation. """
display = decimal.Context(prec=precision)
value = decimal.Decimal(value).normalize(context=display)
string = value.to_eng_string()
if prefix:
prefixes = {e(exponent): prefix for exponent, prefix in prefixes.items()}
return replace(string, prefixes)
else:
return string | [
"def",
"engineering",
"(",
"value",
",",
"precision",
"=",
"3",
",",
"prefix",
"=",
"False",
",",
"prefixes",
"=",
"SI",
")",
":",
"display",
"=",
"decimal",
".",
"Context",
"(",
"prec",
"=",
"precision",
")",
"value",
"=",
"decimal",
".",
"Decimal",
... | Convert a number to engineering notation. | [
"Convert",
"a",
"number",
"to",
"engineering",
"notation",
"."
] | 0b871cdf5b4b5f50e5f3f3d044558801783381c4 | https://github.com/debrouwere/python-ballpark/blob/0b871cdf5b4b5f50e5f3f3d044558801783381c4/ballpark/notation.py#L90-L101 | train | 42,422 |
pmclanahan/django-celery-email | djcelery_email/utils.py | chunked | def chunked(iterator, chunksize):
"""
Yields items from 'iterator' in chunks of size 'chunksize'.
>>> list(chunked([1, 2, 3, 4, 5], chunksize=2))
[(1, 2), (3, 4), (5,)]
"""
chunk = []
for idx, item in enumerate(iterator, 1):
chunk.append(item)
if idx % chunksize == 0:
yield chunk
chunk = []
if chunk:
yield chunk | python | def chunked(iterator, chunksize):
"""
Yields items from 'iterator' in chunks of size 'chunksize'.
>>> list(chunked([1, 2, 3, 4, 5], chunksize=2))
[(1, 2), (3, 4), (5,)]
"""
chunk = []
for idx, item in enumerate(iterator, 1):
chunk.append(item)
if idx % chunksize == 0:
yield chunk
chunk = []
if chunk:
yield chunk | [
"def",
"chunked",
"(",
"iterator",
",",
"chunksize",
")",
":",
"chunk",
"=",
"[",
"]",
"for",
"idx",
",",
"item",
"in",
"enumerate",
"(",
"iterator",
",",
"1",
")",
":",
"chunk",
".",
"append",
"(",
"item",
")",
"if",
"idx",
"%",
"chunksize",
"==",... | Yields items from 'iterator' in chunks of size 'chunksize'.
>>> list(chunked([1, 2, 3, 4, 5], chunksize=2))
[(1, 2), (3, 4), (5,)] | [
"Yields",
"items",
"from",
"iterator",
"in",
"chunks",
"of",
"size",
"chunksize",
"."
] | 6d0684b3d2d6751c4e5066f9215e130e6a91ea78 | https://github.com/pmclanahan/django-celery-email/blob/6d0684b3d2d6751c4e5066f9215e130e6a91ea78/djcelery_email/utils.py#L10-L24 | train | 42,423 |
ClearcodeHQ/mirakuru | src/mirakuru/tcp.py | TCPExecutor.pre_start_check | def pre_start_check(self):
"""
Check if process accepts connections.
.. note::
Process will be considered started, when it'll be able to accept
TCP connections as defined in initializer.
"""
try:
sock = socket.socket()
sock.connect((self.host, self.port))
return True
except (socket.error, socket.timeout):
return False
finally:
# close socket manually for sake of PyPy
sock.close() | python | def pre_start_check(self):
"""
Check if process accepts connections.
.. note::
Process will be considered started, when it'll be able to accept
TCP connections as defined in initializer.
"""
try:
sock = socket.socket()
sock.connect((self.host, self.port))
return True
except (socket.error, socket.timeout):
return False
finally:
# close socket manually for sake of PyPy
sock.close() | [
"def",
"pre_start_check",
"(",
"self",
")",
":",
"try",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
")",
"sock",
".",
"connect",
"(",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
")",
"return",
"True",
"except",
"(",
"socket",
".",
... | Check if process accepts connections.
.. note::
Process will be considered started, when it'll be able to accept
TCP connections as defined in initializer. | [
"Check",
"if",
"process",
"accepts",
"connections",
"."
] | 38203f328479ac9356d468a20daa743807194698 | https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/tcp.py#L55-L72 | train | 42,424 |
ClearcodeHQ/mirakuru | src/mirakuru/http.py | HTTPExecutor.after_start_check | def after_start_check(self):
"""Check if defined URL returns expected status to a HEAD request."""
try:
conn = HTTPConnection(self.host, self.port)
conn.request('HEAD', self.url.path)
status = str(conn.getresponse().status)
if status == self.status or self.status_re.match(status):
conn.close()
return True
except (HTTPException, socket.timeout, socket.error):
return False | python | def after_start_check(self):
"""Check if defined URL returns expected status to a HEAD request."""
try:
conn = HTTPConnection(self.host, self.port)
conn.request('HEAD', self.url.path)
status = str(conn.getresponse().status)
if status == self.status or self.status_re.match(status):
conn.close()
return True
except (HTTPException, socket.timeout, socket.error):
return False | [
"def",
"after_start_check",
"(",
"self",
")",
":",
"try",
":",
"conn",
"=",
"HTTPConnection",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"conn",
".",
"request",
"(",
"'HEAD'",
",",
"self",
".",
"url",
".",
"path",
")",
"status",
"=",
... | Check if defined URL returns expected status to a HEAD request. | [
"Check",
"if",
"defined",
"URL",
"returns",
"expected",
"status",
"to",
"a",
"HEAD",
"request",
"."
] | 38203f328479ac9356d468a20daa743807194698 | https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/http.py#L75-L88 | train | 42,425 |
ClearcodeHQ/mirakuru | src/mirakuru/output.py | OutputExecutor._wait_for_output | def _wait_for_output(self):
"""
Check if output matches banner.
.. warning::
Waiting for I/O completion. It does not work on Windows. Sorry.
"""
# Here we should get an empty list or list with a tuple [(fd, event)]
# When we get list with a tuple we can use readline method on
# the file descriptor.
poll_result = self.poll_obj.poll(0)
if poll_result:
line = self.output().readline()
if self._banner.match(line):
return True
return False | python | def _wait_for_output(self):
"""
Check if output matches banner.
.. warning::
Waiting for I/O completion. It does not work on Windows. Sorry.
"""
# Here we should get an empty list or list with a tuple [(fd, event)]
# When we get list with a tuple we can use readline method on
# the file descriptor.
poll_result = self.poll_obj.poll(0)
if poll_result:
line = self.output().readline()
if self._banner.match(line):
return True
return False | [
"def",
"_wait_for_output",
"(",
"self",
")",
":",
"# Here we should get an empty list or list with a tuple [(fd, event)]",
"# When we get list with a tuple we can use readline method on",
"# the file descriptor.",
"poll_result",
"=",
"self",
".",
"poll_obj",
".",
"poll",
"(",
"0",
... | Check if output matches banner.
.. warning::
Waiting for I/O completion. It does not work on Windows. Sorry. | [
"Check",
"if",
"output",
"matches",
"banner",
"."
] | 38203f328479ac9356d468a20daa743807194698 | https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/output.py#L80-L97 | train | 42,426 |
ClearcodeHQ/mirakuru | src/mirakuru/base.py | SimpleExecutor.start | def start(self):
"""
Start defined process.
After process gets started, timeout countdown begins as well.
:returns: itself
:rtype: SimpleExecutor
.. note::
We want to open ``stdin``, ``stdout`` and ``stderr`` as text
streams in universal newlines mode, so we have to set
``universal_newlines`` to ``True``.
"""
if self.process is None:
command = self.command
if not self._shell:
command = self.command_parts
env = os.environ.copy()
# Trick with marking subprocesses with an environment variable.
#
# There is no easy way to recognize all subprocesses that were
# spawned during lifetime of a certain subprocess so mirakuru does
# this hack in order to mark who was the original parent. Even if
# some subprocess got daemonized or changed original process group
# mirakuru will be able to find it by this environment variable.
#
# There may be a situation when some subprocess will abandon
# original envs from parents and then it won't be later found.
env[ENV_UUID] = self._uuid
popen_kwargs = {
'shell': self._shell,
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'universal_newlines': True,
'env': env,
}
if platform.system() != 'Windows':
popen_kwargs['preexec_fn'] = os.setsid
self.process = subprocess.Popen(
command,
**popen_kwargs
)
self._set_timeout()
return self | python | def start(self):
"""
Start defined process.
After process gets started, timeout countdown begins as well.
:returns: itself
:rtype: SimpleExecutor
.. note::
We want to open ``stdin``, ``stdout`` and ``stderr`` as text
streams in universal newlines mode, so we have to set
``universal_newlines`` to ``True``.
"""
if self.process is None:
command = self.command
if not self._shell:
command = self.command_parts
env = os.environ.copy()
# Trick with marking subprocesses with an environment variable.
#
# There is no easy way to recognize all subprocesses that were
# spawned during lifetime of a certain subprocess so mirakuru does
# this hack in order to mark who was the original parent. Even if
# some subprocess got daemonized or changed original process group
# mirakuru will be able to find it by this environment variable.
#
# There may be a situation when some subprocess will abandon
# original envs from parents and then it won't be later found.
env[ENV_UUID] = self._uuid
popen_kwargs = {
'shell': self._shell,
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'universal_newlines': True,
'env': env,
}
if platform.system() != 'Windows':
popen_kwargs['preexec_fn'] = os.setsid
self.process = subprocess.Popen(
command,
**popen_kwargs
)
self._set_timeout()
return self | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"process",
"is",
"None",
":",
"command",
"=",
"self",
".",
"command",
"if",
"not",
"self",
".",
"_shell",
":",
"command",
"=",
"self",
".",
"command_parts",
"env",
"=",
"os",
".",
"environ",
... | Start defined process.
After process gets started, timeout countdown begins as well.
:returns: itself
:rtype: SimpleExecutor
.. note::
We want to open ``stdin``, ``stdout`` and ``stderr`` as text
streams in universal newlines mode, so we have to set
``universal_newlines`` to ``True``. | [
"Start",
"defined",
"process",
"."
] | 38203f328479ac9356d468a20daa743807194698 | https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/base.py#L153-L199 | train | 42,427 |
ClearcodeHQ/mirakuru | src/mirakuru/base.py | SimpleExecutor.kill | def kill(self, wait=True, sig=None):
"""
Kill the process if running.
:param bool wait: set to `True` to wait for the process to end,
or False, to simply proceed after sending signal.
:param int sig: signal used to kill process run by the executor.
None by default.
:returns: itself
:rtype: SimpleExecutor
"""
if sig is None:
sig = self._sig_kill
if self.running():
os.killpg(self.process.pid, sig)
if wait:
self.process.wait()
self._kill_all_kids(sig)
self._clear_process()
return self | python | def kill(self, wait=True, sig=None):
"""
Kill the process if running.
:param bool wait: set to `True` to wait for the process to end,
or False, to simply proceed after sending signal.
:param int sig: signal used to kill process run by the executor.
None by default.
:returns: itself
:rtype: SimpleExecutor
"""
if sig is None:
sig = self._sig_kill
if self.running():
os.killpg(self.process.pid, sig)
if wait:
self.process.wait()
self._kill_all_kids(sig)
self._clear_process()
return self | [
"def",
"kill",
"(",
"self",
",",
"wait",
"=",
"True",
",",
"sig",
"=",
"None",
")",
":",
"if",
"sig",
"is",
"None",
":",
"sig",
"=",
"self",
".",
"_sig_kill",
"if",
"self",
".",
"running",
"(",
")",
":",
"os",
".",
"killpg",
"(",
"self",
".",
... | Kill the process if running.
:param bool wait: set to `True` to wait for the process to end,
or False, to simply proceed after sending signal.
:param int sig: signal used to kill process run by the executor.
None by default.
:returns: itself
:rtype: SimpleExecutor | [
"Kill",
"the",
"process",
"if",
"running",
"."
] | 38203f328479ac9356d468a20daa743807194698 | https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/base.py#L308-L328 | train | 42,428 |
ClearcodeHQ/mirakuru | src/mirakuru/base.py | SimpleExecutor.wait_for | def wait_for(self, wait_for):
"""
Wait for callback to return True.
Simply returns if wait_for condition has been met,
raises TimeoutExpired otherwise and kills the process.
:param callback wait_for: callback to call
:raises: mirakuru.exceptions.TimeoutExpired
:returns: itself
:rtype: SimpleExecutor
"""
while self.check_timeout():
if wait_for():
return self
time.sleep(self._sleep)
self.kill()
raise TimeoutExpired(self, timeout=self._timeout) | python | def wait_for(self, wait_for):
"""
Wait for callback to return True.
Simply returns if wait_for condition has been met,
raises TimeoutExpired otherwise and kills the process.
:param callback wait_for: callback to call
:raises: mirakuru.exceptions.TimeoutExpired
:returns: itself
:rtype: SimpleExecutor
"""
while self.check_timeout():
if wait_for():
return self
time.sleep(self._sleep)
self.kill()
raise TimeoutExpired(self, timeout=self._timeout) | [
"def",
"wait_for",
"(",
"self",
",",
"wait_for",
")",
":",
"while",
"self",
".",
"check_timeout",
"(",
")",
":",
"if",
"wait_for",
"(",
")",
":",
"return",
"self",
"time",
".",
"sleep",
"(",
"self",
".",
"_sleep",
")",
"self",
".",
"kill",
"(",
")"... | Wait for callback to return True.
Simply returns if wait_for condition has been met,
raises TimeoutExpired otherwise and kills the process.
:param callback wait_for: callback to call
:raises: mirakuru.exceptions.TimeoutExpired
:returns: itself
:rtype: SimpleExecutor | [
"Wait",
"for",
"callback",
"to",
"return",
"True",
"."
] | 38203f328479ac9356d468a20daa743807194698 | https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/base.py#L335-L353 | train | 42,429 |
ClearcodeHQ/mirakuru | src/mirakuru/base.py | Executor.start | def start(self):
"""
Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor
"""
if self.pre_start_check():
# Some other executor (or process) is running with same config:
raise AlreadyRunning(self)
super(Executor, self).start()
self.wait_for(self.check_subprocess)
return self | python | def start(self):
"""
Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor
"""
if self.pre_start_check():
# Some other executor (or process) is running with same config:
raise AlreadyRunning(self)
super(Executor, self).start()
self.wait_for(self.check_subprocess)
return self | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"pre_start_check",
"(",
")",
":",
"# Some other executor (or process) is running with same config:",
"raise",
"AlreadyRunning",
"(",
"self",
")",
"super",
"(",
"Executor",
",",
"self",
")",
".",
"start",
... | Start executor with additional checks.
Checks if previous executor isn't running then start process
(executor) and wait until it's started.
:returns: itself
:rtype: Executor | [
"Start",
"executor",
"with",
"additional",
"checks",
"."
] | 38203f328479ac9356d468a20daa743807194698 | https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/base.py#L416-L432 | train | 42,430 |
ClearcodeHQ/mirakuru | src/mirakuru/base.py | Executor.check_subprocess | def check_subprocess(self):
"""
Make sure the process didn't exit with an error and run the checks.
:rtype: bool
:return: the actual check status
:raise ProcessExitedWithError: when the main process exits with
an error
"""
exit_code = self.process.poll()
if exit_code is not None and exit_code != 0:
# The main process exited with an error. Clean up the children
# if any.
self._kill_all_kids(self._sig_kill)
self._clear_process()
raise ProcessExitedWithError(self, exit_code)
return self.after_start_check() | python | def check_subprocess(self):
"""
Make sure the process didn't exit with an error and run the checks.
:rtype: bool
:return: the actual check status
:raise ProcessExitedWithError: when the main process exits with
an error
"""
exit_code = self.process.poll()
if exit_code is not None and exit_code != 0:
# The main process exited with an error. Clean up the children
# if any.
self._kill_all_kids(self._sig_kill)
self._clear_process()
raise ProcessExitedWithError(self, exit_code)
return self.after_start_check() | [
"def",
"check_subprocess",
"(",
"self",
")",
":",
"exit_code",
"=",
"self",
".",
"process",
".",
"poll",
"(",
")",
"if",
"exit_code",
"is",
"not",
"None",
"and",
"exit_code",
"!=",
"0",
":",
"# The main process exited with an error. Clean up the children",
"# if a... | Make sure the process didn't exit with an error and run the checks.
:rtype: bool
:return: the actual check status
:raise ProcessExitedWithError: when the main process exits with
an error | [
"Make",
"sure",
"the",
"process",
"didn",
"t",
"exit",
"with",
"an",
"error",
"and",
"run",
"the",
"checks",
"."
] | 38203f328479ac9356d468a20daa743807194698 | https://github.com/ClearcodeHQ/mirakuru/blob/38203f328479ac9356d468a20daa743807194698/src/mirakuru/base.py#L434-L451 | train | 42,431 |
Mic92/kshape | kshape/core.py | _ncc_c_2dim | def _ncc_c_2dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 1 dimensional
y vector
Returns a 2 dimensional array of normalized fourier transforms
"""
den = np.array(norm(x, axis=1) * norm(y))
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size)))
cc = np.concatenate((cc[:,-(x_len-1):], cc[:,:x_len]), axis=1)
return np.real(cc) / den[:, np.newaxis] | python | def _ncc_c_2dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 1 dimensional
y vector
Returns a 2 dimensional array of normalized fourier transforms
"""
den = np.array(norm(x, axis=1) * norm(y))
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size)))
cc = np.concatenate((cc[:,-(x_len-1):], cc[:,:x_len]), axis=1)
return np.real(cc) / den[:, np.newaxis] | [
"def",
"_ncc_c_2dim",
"(",
"x",
",",
"y",
")",
":",
"den",
"=",
"np",
".",
"array",
"(",
"norm",
"(",
"x",
",",
"axis",
"=",
"1",
")",
"*",
"norm",
"(",
"y",
")",
")",
"den",
"[",
"den",
"==",
"0",
"]",
"=",
"np",
".",
"Inf",
"x_len",
"="... | Variant of NCCc that operates with 2 dimensional X arrays and 1 dimensional
y vector
Returns a 2 dimensional array of normalized fourier transforms | [
"Variant",
"of",
"NCCc",
"that",
"operates",
"with",
"2",
"dimensional",
"X",
"arrays",
"and",
"1",
"dimensional",
"y",
"vector"
] | d9e8ec0bae9293f7b25550c1d2d39f2a22dabe4b | https://github.com/Mic92/kshape/blob/d9e8ec0bae9293f7b25550c1d2d39f2a22dabe4b/kshape/core.py#L66-L79 | train | 42,432 |
Mic92/kshape | kshape/core.py | _ncc_c_3dim | def _ncc_c_3dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional
y vector
Returns a 3 dimensional array of normalized fourier transforms
"""
den = norm(x, axis=1)[:, None] * norm(y, axis=1)
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))[:, None])
cc = np.concatenate((cc[:,:,-(x_len-1):], cc[:,:,:x_len]), axis=2)
return np.real(cc) / den.T[:, :, None] | python | def _ncc_c_3dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional
y vector
Returns a 3 dimensional array of normalized fourier transforms
"""
den = norm(x, axis=1)[:, None] * norm(y, axis=1)
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))[:, None])
cc = np.concatenate((cc[:,:,-(x_len-1):], cc[:,:,:x_len]), axis=2)
return np.real(cc) / den.T[:, :, None] | [
"def",
"_ncc_c_3dim",
"(",
"x",
",",
"y",
")",
":",
"den",
"=",
"norm",
"(",
"x",
",",
"axis",
"=",
"1",
")",
"[",
":",
",",
"None",
"]",
"*",
"norm",
"(",
"y",
",",
"axis",
"=",
"1",
")",
"den",
"[",
"den",
"==",
"0",
"]",
"=",
"np",
"... | Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional
y vector
Returns a 3 dimensional array of normalized fourier transforms | [
"Variant",
"of",
"NCCc",
"that",
"operates",
"with",
"2",
"dimensional",
"X",
"arrays",
"and",
"2",
"dimensional",
"y",
"vector"
] | d9e8ec0bae9293f7b25550c1d2d39f2a22dabe4b | https://github.com/Mic92/kshape/blob/d9e8ec0bae9293f7b25550c1d2d39f2a22dabe4b/kshape/core.py#L82-L95 | train | 42,433 |
DinoTools/python-flextls | flextls/helper.py | get_version_by_version_id | def get_version_by_version_id(version_id):
"""
Get the internal version ID be the version.
:param Tuple version_id: Major and minor version number
:return: Internal version ID
:rtype: Integer|None
"""
for ver in registry.version_info:
if ver.version_id == version_id:
return ver.id
return None | python | def get_version_by_version_id(version_id):
"""
Get the internal version ID be the version.
:param Tuple version_id: Major and minor version number
:return: Internal version ID
:rtype: Integer|None
"""
for ver in registry.version_info:
if ver.version_id == version_id:
return ver.id
return None | [
"def",
"get_version_by_version_id",
"(",
"version_id",
")",
":",
"for",
"ver",
"in",
"registry",
".",
"version_info",
":",
"if",
"ver",
".",
"version_id",
"==",
"version_id",
":",
"return",
"ver",
".",
"id",
"return",
"None"
] | Get the internal version ID be the version.
:param Tuple version_id: Major and minor version number
:return: Internal version ID
:rtype: Integer|None | [
"Get",
"the",
"internal",
"version",
"ID",
"be",
"the",
"version",
"."
] | c73448f20e79b1969adcc2271b91d8edda517857 | https://github.com/DinoTools/python-flextls/blob/c73448f20e79b1969adcc2271b91d8edda517857/flextls/helper.py#L4-L16 | train | 42,434 |
DinoTools/python-flextls | flextls/helper.py | get_version_name | def get_version_name(version_id):
"""
Get the name of a protocol version by the internal version ID.
:param Integer version_id: Internal protocol version ID
:return: Name of the version
:rtype: String
"""
ver = registry.version_info.get(version_id)
if ver:
return ver.name
return 'unknown' | python | def get_version_name(version_id):
"""
Get the name of a protocol version by the internal version ID.
:param Integer version_id: Internal protocol version ID
:return: Name of the version
:rtype: String
"""
ver = registry.version_info.get(version_id)
if ver:
return ver.name
return 'unknown' | [
"def",
"get_version_name",
"(",
"version_id",
")",
":",
"ver",
"=",
"registry",
".",
"version_info",
".",
"get",
"(",
"version_id",
")",
"if",
"ver",
":",
"return",
"ver",
".",
"name",
"return",
"'unknown'"
] | Get the name of a protocol version by the internal version ID.
:param Integer version_id: Internal protocol version ID
:return: Name of the version
:rtype: String | [
"Get",
"the",
"name",
"of",
"a",
"protocol",
"version",
"by",
"the",
"internal",
"version",
"ID",
"."
] | c73448f20e79b1969adcc2271b91d8edda517857 | https://github.com/DinoTools/python-flextls/blob/c73448f20e79b1969adcc2271b91d8edda517857/flextls/helper.py#L19-L31 | train | 42,435 |
DinoTools/python-flextls | flextls/helper.py | get_version_id | def get_version_id(protocol_version):
"""
Get a tuple with major and minor version number
:param Integer protocol_version: Internal version ID
:return: Tuple of major and minor protocol version
:rtype: Tuple
"""
ver = registry.version_info.get(protocol_version)
if ver:
return ver.version_id | python | def get_version_id(protocol_version):
"""
Get a tuple with major and minor version number
:param Integer protocol_version: Internal version ID
:return: Tuple of major and minor protocol version
:rtype: Tuple
"""
ver = registry.version_info.get(protocol_version)
if ver:
return ver.version_id | [
"def",
"get_version_id",
"(",
"protocol_version",
")",
":",
"ver",
"=",
"registry",
".",
"version_info",
".",
"get",
"(",
"protocol_version",
")",
"if",
"ver",
":",
"return",
"ver",
".",
"version_id"
] | Get a tuple with major and minor version number
:param Integer protocol_version: Internal version ID
:return: Tuple of major and minor protocol version
:rtype: Tuple | [
"Get",
"a",
"tuple",
"with",
"major",
"and",
"minor",
"version",
"number"
] | c73448f20e79b1969adcc2271b91d8edda517857 | https://github.com/DinoTools/python-flextls/blob/c73448f20e79b1969adcc2271b91d8edda517857/flextls/helper.py#L34-L44 | train | 42,436 |
Iotic-Labs/py-lz4framed | lz4framed/__init__.py | Compressor.end | def end(self):
"""Finalise lz4 frame, outputting any remaining as return from this function or by writing to fp)"""
with self.__lock:
if self.__write:
self.__write(compress_end(self.__ctx))
else:
return compress_end(self.__ctx) | python | def end(self):
"""Finalise lz4 frame, outputting any remaining as return from this function or by writing to fp)"""
with self.__lock:
if self.__write:
self.__write(compress_end(self.__ctx))
else:
return compress_end(self.__ctx) | [
"def",
"end",
"(",
"self",
")",
":",
"with",
"self",
".",
"__lock",
":",
"if",
"self",
".",
"__write",
":",
"self",
".",
"__write",
"(",
"compress_end",
"(",
"self",
".",
"__ctx",
")",
")",
"else",
":",
"return",
"compress_end",
"(",
"self",
".",
"... | Finalise lz4 frame, outputting any remaining as return from this function or by writing to fp) | [
"Finalise",
"lz4",
"frame",
"outputting",
"any",
"remaining",
"as",
"return",
"from",
"this",
"function",
"or",
"by",
"writing",
"to",
"fp",
")"
] | e91a89df7b656d8b1d8092e12c0697cb1fd8597c | https://github.com/Iotic-Labs/py-lz4framed/blob/e91a89df7b656d8b1d8092e12c0697cb1fd8597c/lz4framed/__init__.py#L136-L142 | train | 42,437 |
DinoTools/python-flextls | flextls/field.py | EnumField.get_value_name | def get_value_name(self, pretty=False):
"""
Get the name of the value
:param Boolean pretty: Return the name in a pretty format
:return: The name
:rtype: String
"""
if pretty:
return "%s (%x)" % (
self.enums.get(self._value, "n/a"),
self._value
)
return self.enums.get(self._value, "n/a") | python | def get_value_name(self, pretty=False):
"""
Get the name of the value
:param Boolean pretty: Return the name in a pretty format
:return: The name
:rtype: String
"""
if pretty:
return "%s (%x)" % (
self.enums.get(self._value, "n/a"),
self._value
)
return self.enums.get(self._value, "n/a") | [
"def",
"get_value_name",
"(",
"self",
",",
"pretty",
"=",
"False",
")",
":",
"if",
"pretty",
":",
"return",
"\"%s (%x)\"",
"%",
"(",
"self",
".",
"enums",
".",
"get",
"(",
"self",
".",
"_value",
",",
"\"n/a\"",
")",
",",
"self",
".",
"_value",
")",
... | Get the name of the value
:param Boolean pretty: Return the name in a pretty format
:return: The name
:rtype: String | [
"Get",
"the",
"name",
"of",
"the",
"value"
] | c73448f20e79b1969adcc2271b91d8edda517857 | https://github.com/DinoTools/python-flextls/blob/c73448f20e79b1969adcc2271b91d8edda517857/flextls/field.py#L157-L171 | train | 42,438 |
DinoTools/python-flextls | flextls/field.py | EnumField.set_value | def set_value(self, value, force=False):
"""
Set the value.
:param String|Integer value: The value to set. Must be in the enum list.
:param Boolean force: Set the value without checking it
:raises ValueError: If value name given but it isn't available
:raises TypeError: If value is not String or Integer
"""
if force:
self._value = value
return
if value is None:
self._value = value
return
if isinstance(value, six.integer_types):
self._value = value
return
if isinstance(value, six.string_types):
for v, n in self.enums.items():
if n == value:
self._value = v
return
raise ValueError("Unable to find value name in enum list")
raise TypeError(
"Value for '%s' must by of type String or Integer not '%s'" % (
self.name,
type(value)
)
) | python | def set_value(self, value, force=False):
"""
Set the value.
:param String|Integer value: The value to set. Must be in the enum list.
:param Boolean force: Set the value without checking it
:raises ValueError: If value name given but it isn't available
:raises TypeError: If value is not String or Integer
"""
if force:
self._value = value
return
if value is None:
self._value = value
return
if isinstance(value, six.integer_types):
self._value = value
return
if isinstance(value, six.string_types):
for v, n in self.enums.items():
if n == value:
self._value = v
return
raise ValueError("Unable to find value name in enum list")
raise TypeError(
"Value for '%s' must by of type String or Integer not '%s'" % (
self.name,
type(value)
)
) | [
"def",
"set_value",
"(",
"self",
",",
"value",
",",
"force",
"=",
"False",
")",
":",
"if",
"force",
":",
"self",
".",
"_value",
"=",
"value",
"return",
"if",
"value",
"is",
"None",
":",
"self",
".",
"_value",
"=",
"value",
"return",
"if",
"isinstance... | Set the value.
:param String|Integer value: The value to set. Must be in the enum list.
:param Boolean force: Set the value without checking it
:raises ValueError: If value name given but it isn't available
:raises TypeError: If value is not String or Integer | [
"Set",
"the",
"value",
"."
] | c73448f20e79b1969adcc2271b91d8edda517857 | https://github.com/DinoTools/python-flextls/blob/c73448f20e79b1969adcc2271b91d8edda517857/flextls/field.py#L173-L207 | train | 42,439 |
DinoTools/python-flextls | flextls/field.py | ECParametersField.dissect | def dissect(self, data):
"""
Dissect the field.
:param bytes data: The data to extract the field value from
:return: The rest of the data not used to dissect the field value
:rtype: bytes
"""
size = struct.calcsize("B")
if len(data) < size:
raise NotEnoughData(
"Not enough data to decode field '%s' value" % self.name
)
curve_type = struct.unpack("B", data[:size])[0]
if curve_type == 0x03:
self._value = ECParametersNamedCurveField("none")
data = self._value.dissect(data)
else:
raise NotImplementedError(
"Decoding of KeyExchange message for curve 0x%.2X not implemented" % curve_type
)
return data | python | def dissect(self, data):
"""
Dissect the field.
:param bytes data: The data to extract the field value from
:return: The rest of the data not used to dissect the field value
:rtype: bytes
"""
size = struct.calcsize("B")
if len(data) < size:
raise NotEnoughData(
"Not enough data to decode field '%s' value" % self.name
)
curve_type = struct.unpack("B", data[:size])[0]
if curve_type == 0x03:
self._value = ECParametersNamedCurveField("none")
data = self._value.dissect(data)
else:
raise NotImplementedError(
"Decoding of KeyExchange message for curve 0x%.2X not implemented" % curve_type
)
return data | [
"def",
"dissect",
"(",
"self",
",",
"data",
")",
":",
"size",
"=",
"struct",
".",
"calcsize",
"(",
"\"B\"",
")",
"if",
"len",
"(",
"data",
")",
"<",
"size",
":",
"raise",
"NotEnoughData",
"(",
"\"Not enough data to decode field '%s' value\"",
"%",
"self",
... | Dissect the field.
:param bytes data: The data to extract the field value from
:return: The rest of the data not used to dissect the field value
:rtype: bytes | [
"Dissect",
"the",
"field",
"."
] | c73448f20e79b1969adcc2271b91d8edda517857 | https://github.com/DinoTools/python-flextls/blob/c73448f20e79b1969adcc2271b91d8edda517857/flextls/field.py#L771-L794 | train | 42,440 |
noisyboiler/wampy | wampy/session.py | Session._register_procedure | def _register_procedure(self, procedure_name, invocation_policy="single"):
""" Register a "procedure" on a Client as callable over the Router.
"""
options = {"invoke": invocation_policy}
message = Register(procedure=procedure_name, options=options)
request_id = message.request_id
try:
self.send_message(message)
except ValueError:
raise WampProtocolError(
"failed to register callee: %s", procedure_name
)
self.request_ids[request_id] = procedure_name | python | def _register_procedure(self, procedure_name, invocation_policy="single"):
""" Register a "procedure" on a Client as callable over the Router.
"""
options = {"invoke": invocation_policy}
message = Register(procedure=procedure_name, options=options)
request_id = message.request_id
try:
self.send_message(message)
except ValueError:
raise WampProtocolError(
"failed to register callee: %s", procedure_name
)
self.request_ids[request_id] = procedure_name | [
"def",
"_register_procedure",
"(",
"self",
",",
"procedure_name",
",",
"invocation_policy",
"=",
"\"single\"",
")",
":",
"options",
"=",
"{",
"\"invoke\"",
":",
"invocation_policy",
"}",
"message",
"=",
"Register",
"(",
"procedure",
"=",
"procedure_name",
",",
"... | Register a "procedure" on a Client as callable over the Router. | [
"Register",
"a",
"procedure",
"on",
"a",
"Client",
"as",
"callable",
"over",
"the",
"Router",
"."
] | 7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf | https://github.com/noisyboiler/wampy/blob/7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf/wampy/session.py#L237-L251 | train | 42,441 |
noisyboiler/wampy | wampy/peers/routers.py | Crossbar.start | def start(self):
""" Start Crossbar.io in a subprocess.
"""
if self.started is True:
raise WampyError("Router already started")
# will attempt to connect or start up the CrossBar
crossbar_config_path = self.config_path
cbdir = self.crossbar_directory
# starts the process from the root of the test namespace
cmd = [
'crossbar', 'start',
'--cbdir', cbdir,
'--config', crossbar_config_path,
]
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
self._wait_until_ready()
logger.info(
"Crosbar.io is ready for connections on %s (IPV%s)",
self.url, self.ipv
)
self.started = True | python | def start(self):
""" Start Crossbar.io in a subprocess.
"""
if self.started is True:
raise WampyError("Router already started")
# will attempt to connect or start up the CrossBar
crossbar_config_path = self.config_path
cbdir = self.crossbar_directory
# starts the process from the root of the test namespace
cmd = [
'crossbar', 'start',
'--cbdir', cbdir,
'--config', crossbar_config_path,
]
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
self._wait_until_ready()
logger.info(
"Crosbar.io is ready for connections on %s (IPV%s)",
self.url, self.ipv
)
self.started = True | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"started",
"is",
"True",
":",
"raise",
"WampyError",
"(",
"\"Router already started\"",
")",
"# will attempt to connect or start up the CrossBar",
"crossbar_config_path",
"=",
"self",
".",
"config_path",
"cbdir... | Start Crossbar.io in a subprocess. | [
"Start",
"Crossbar",
".",
"io",
"in",
"a",
"subprocess",
"."
] | 7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf | https://github.com/noisyboiler/wampy/blob/7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf/wampy/peers/routers.py#L112-L137 | train | 42,442 |
noisyboiler/wampy | wampy/transports/websocket/connection.py | WebSocket._get_handshake_headers | def _get_handshake_headers(self, upgrade):
""" Do an HTTP upgrade handshake with the server.
Websockets upgrade from HTTP rather than TCP largely because it was
assumed that servers which provide websockets will always be talking to
a browser. Maybe a reasonable assumption once upon a time...
The headers here will go a little further and also agree the
WAMP websocket JSON subprotocols.
"""
headers = []
# https://tools.ietf.org/html/rfc6455
headers.append("GET {} HTTP/1.1".format(self.websocket_location))
headers.append("Host: {}:{}".format(self.host, self.port))
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
# Sec-WebSocket-Key header containing base64-encoded random bytes,
# and the server replies with a hash of the key in the
# Sec-WebSocket-Accept header. This is intended to prevent a caching
# proxy from re-sending a previous WebSocket conversation and does not
# provide any authentication, privacy or integrity
headers.append("Sec-WebSocket-Key: {}".format(self.key))
headers.append("Origin: ws://{}:{}".format(self.host, self.port))
headers.append("Sec-WebSocket-Version: {}".format(WEBSOCKET_VERSION))
if upgrade:
headers.append("Sec-WebSocket-Protocol: {}".format(
WEBSOCKET_SUBPROTOCOLS)
)
logger.debug("connection headers: %s", headers)
return headers | python | def _get_handshake_headers(self, upgrade):
""" Do an HTTP upgrade handshake with the server.
Websockets upgrade from HTTP rather than TCP largely because it was
assumed that servers which provide websockets will always be talking to
a browser. Maybe a reasonable assumption once upon a time...
The headers here will go a little further and also agree the
WAMP websocket JSON subprotocols.
"""
headers = []
# https://tools.ietf.org/html/rfc6455
headers.append("GET {} HTTP/1.1".format(self.websocket_location))
headers.append("Host: {}:{}".format(self.host, self.port))
headers.append("Upgrade: websocket")
headers.append("Connection: Upgrade")
# Sec-WebSocket-Key header containing base64-encoded random bytes,
# and the server replies with a hash of the key in the
# Sec-WebSocket-Accept header. This is intended to prevent a caching
# proxy from re-sending a previous WebSocket conversation and does not
# provide any authentication, privacy or integrity
headers.append("Sec-WebSocket-Key: {}".format(self.key))
headers.append("Origin: ws://{}:{}".format(self.host, self.port))
headers.append("Sec-WebSocket-Version: {}".format(WEBSOCKET_VERSION))
if upgrade:
headers.append("Sec-WebSocket-Protocol: {}".format(
WEBSOCKET_SUBPROTOCOLS)
)
logger.debug("connection headers: %s", headers)
return headers | [
"def",
"_get_handshake_headers",
"(",
"self",
",",
"upgrade",
")",
":",
"headers",
"=",
"[",
"]",
"# https://tools.ietf.org/html/rfc6455",
"headers",
".",
"append",
"(",
"\"GET {} HTTP/1.1\"",
".",
"format",
"(",
"self",
".",
"websocket_location",
")",
")",
"heade... | Do an HTTP upgrade handshake with the server.
Websockets upgrade from HTTP rather than TCP largely because it was
assumed that servers which provide websockets will always be talking to
a browser. Maybe a reasonable assumption once upon a time...
The headers here will go a little further and also agree the
WAMP websocket JSON subprotocols. | [
"Do",
"an",
"HTTP",
"upgrade",
"handshake",
"with",
"the",
"server",
"."
] | 7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf | https://github.com/noisyboiler/wampy/blob/7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf/wampy/transports/websocket/connection.py#L183-L216 | train | 42,443 |
noisyboiler/wampy | wampy/transports/websocket/frames.py | FrameFactory.generate_mask | def generate_mask(cls, mask_key, data):
""" Mask data.
:Parameters:
mask_key: byte string
4 byte string(byte), e.g. '\x10\xc6\xc4\x16'
data: str
data to mask
"""
# Masking of WebSocket traffic from client to server is required
# because of the unlikely chance that malicious code could cause
# some broken proxies to do the wrong thing and use this as an
# attack of some kind. Nobody has proved that this could actually
# happen, but since the fact that it could happen was reason enough
# for browser vendors to get twitchy, masking was added to remove
# the possibility of it being used as an attack.
if data is None:
data = ""
data = bytearray(data, 'utf-8')
_m = array.array("B", mask_key)
_d = array.array("B", data)
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
return _d.tostring() | python | def generate_mask(cls, mask_key, data):
""" Mask data.
:Parameters:
mask_key: byte string
4 byte string(byte), e.g. '\x10\xc6\xc4\x16'
data: str
data to mask
"""
# Masking of WebSocket traffic from client to server is required
# because of the unlikely chance that malicious code could cause
# some broken proxies to do the wrong thing and use this as an
# attack of some kind. Nobody has proved that this could actually
# happen, but since the fact that it could happen was reason enough
# for browser vendors to get twitchy, masking was added to remove
# the possibility of it being used as an attack.
if data is None:
data = ""
data = bytearray(data, 'utf-8')
_m = array.array("B", mask_key)
_d = array.array("B", data)
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
return _d.tostring() | [
"def",
"generate_mask",
"(",
"cls",
",",
"mask_key",
",",
"data",
")",
":",
"# Masking of WebSocket traffic from client to server is required",
"# because of the unlikely chance that malicious code could cause",
"# some broken proxies to do the wrong thing and use this as an",
"# attack of... | Mask data.
:Parameters:
mask_key: byte string
4 byte string(byte), e.g. '\x10\xc6\xc4\x16'
data: str
data to mask | [
"Mask",
"data",
"."
] | 7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf | https://github.com/noisyboiler/wampy/blob/7c7ef246fec1b2bf3ec3a0e24c85c42fdd99d4bf/wampy/transports/websocket/frames.py#L194-L222 | train | 42,444 |
eddieantonio/perfection | perfection/forest.py | ForestGraph.edges | def edges(self):
"""
Edges of this graph, in canonical order.
"""
canonical_edges = set()
for v1, neighbours in self._vertices.items():
for v2 in neighbours:
edge = self.canonical_order((v1, v2))
canonical_edges.add(edge)
return canonical_edges | python | def edges(self):
"""
Edges of this graph, in canonical order.
"""
canonical_edges = set()
for v1, neighbours in self._vertices.items():
for v2 in neighbours:
edge = self.canonical_order((v1, v2))
canonical_edges.add(edge)
return canonical_edges | [
"def",
"edges",
"(",
"self",
")",
":",
"canonical_edges",
"=",
"set",
"(",
")",
"for",
"v1",
",",
"neighbours",
"in",
"self",
".",
"_vertices",
".",
"items",
"(",
")",
":",
"for",
"v2",
"in",
"neighbours",
":",
"edge",
"=",
"self",
".",
"canonical_or... | Edges of this graph, in canonical order. | [
"Edges",
"of",
"this",
"graph",
"in",
"canonical",
"order",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/forest.py#L104-L113 | train | 42,445 |
eddieantonio/perfection | perfection/czech.py | ordered_deduplicate | def ordered_deduplicate(sequence):
"""
Returns the sequence as a tuple with the duplicates removed,
preserving input order. Any duplicates following the first
occurrence are removed.
>>> ordered_deduplicate([1, 2, 3, 1, 32, 1, 2])
(1, 2, 3, 32)
Based on recipe from this StackOverflow post:
http://stackoverflow.com/a/480227
"""
seen = set()
# Micro optimization: each call to seen_add saves an extra attribute
# lookup in most iterations of the loop.
seen_add = seen.add
return tuple(x for x in sequence if not (x in seen or seen_add(x))) | python | def ordered_deduplicate(sequence):
"""
Returns the sequence as a tuple with the duplicates removed,
preserving input order. Any duplicates following the first
occurrence are removed.
>>> ordered_deduplicate([1, 2, 3, 1, 32, 1, 2])
(1, 2, 3, 32)
Based on recipe from this StackOverflow post:
http://stackoverflow.com/a/480227
"""
seen = set()
# Micro optimization: each call to seen_add saves an extra attribute
# lookup in most iterations of the loop.
seen_add = seen.add
return tuple(x for x in sequence if not (x in seen or seen_add(x))) | [
"def",
"ordered_deduplicate",
"(",
"sequence",
")",
":",
"seen",
"=",
"set",
"(",
")",
"# Micro optimization: each call to seen_add saves an extra attribute",
"# lookup in most iterations of the loop.",
"seen_add",
"=",
"seen",
".",
"add",
"return",
"tuple",
"(",
"x",
"fo... | Returns the sequence as a tuple with the duplicates removed,
preserving input order. Any duplicates following the first
occurrence are removed.
>>> ordered_deduplicate([1, 2, 3, 1, 32, 1, 2])
(1, 2, 3, 32)
Based on recipe from this StackOverflow post:
http://stackoverflow.com/a/480227 | [
"Returns",
"the",
"sequence",
"as",
"a",
"tuple",
"with",
"the",
"duplicates",
"removed",
"preserving",
"input",
"order",
".",
"Any",
"duplicates",
"following",
"the",
"first",
"occurrence",
"are",
"removed",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L194-L212 | train | 42,446 |
eddieantonio/perfection | perfection/czech.py | hash_parameters | def hash_parameters(words, minimize_indices=False):
"""
Gives hash parameters for the given set of words.
>>> info = hash_parameters('sun mon tue wed thu fri sat'.split())
>>> len(info.t1)
21
>>> len(info.t2)
21
>>> len(info.g) # g values are 1-indexed...
22
"""
# Ensure that we have an indexable sequence.
words = tuple(words)
# Delegate to the hash builder.
return CzechHashBuilder(words).hash_info | python | def hash_parameters(words, minimize_indices=False):
"""
Gives hash parameters for the given set of words.
>>> info = hash_parameters('sun mon tue wed thu fri sat'.split())
>>> len(info.t1)
21
>>> len(info.t2)
21
>>> len(info.g) # g values are 1-indexed...
22
"""
# Ensure that we have an indexable sequence.
words = tuple(words)
# Delegate to the hash builder.
return CzechHashBuilder(words).hash_info | [
"def",
"hash_parameters",
"(",
"words",
",",
"minimize_indices",
"=",
"False",
")",
":",
"# Ensure that we have an indexable sequence.",
"words",
"=",
"tuple",
"(",
"words",
")",
"# Delegate to the hash builder.",
"return",
"CzechHashBuilder",
"(",
"words",
")",
".",
... | Gives hash parameters for the given set of words.
>>> info = hash_parameters('sun mon tue wed thu fri sat'.split())
>>> len(info.t1)
21
>>> len(info.t2)
21
>>> len(info.g) # g values are 1-indexed...
22 | [
"Gives",
"hash",
"parameters",
"for",
"the",
"given",
"set",
"of",
"words",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L217-L233 | train | 42,447 |
eddieantonio/perfection | perfection/czech.py | make_pickable_hash | def make_pickable_hash(words, *args, **kwargs):
"""
Creates an ordered, minimal perfect hash function for the given sequence
of words.
>>> hf = make_pickable_hash(['sun', 'mon', 'tue', 'wed', 'thu',
... 'fri', 'sat'])
>>> hf('fri')
5
>>> hf('sun')
0
"""
return PickableHash(CzechHashBuilder(words, *args, **kwargs)).czech_hash | python | def make_pickable_hash(words, *args, **kwargs):
"""
Creates an ordered, minimal perfect hash function for the given sequence
of words.
>>> hf = make_pickable_hash(['sun', 'mon', 'tue', 'wed', 'thu',
... 'fri', 'sat'])
>>> hf('fri')
5
>>> hf('sun')
0
"""
return PickableHash(CzechHashBuilder(words, *args, **kwargs)).czech_hash | [
"def",
"make_pickable_hash",
"(",
"words",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"PickableHash",
"(",
"CzechHashBuilder",
"(",
"words",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
".",
"czech_hash"
] | Creates an ordered, minimal perfect hash function for the given sequence
of words.
>>> hf = make_pickable_hash(['sun', 'mon', 'tue', 'wed', 'thu',
... 'fri', 'sat'])
>>> hf('fri')
5
>>> hf('sun')
0 | [
"Creates",
"an",
"ordered",
"minimal",
"perfect",
"hash",
"function",
"for",
"the",
"given",
"sequence",
"of",
"words",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L272-L284 | train | 42,448 |
eddieantonio/perfection | perfection/czech.py | CzechHashBuilder.hash_function | def hash_function(self):
"""
Returns the hash function proper. Ensures that `self` is not bound to
the returned closure.
"""
assert hasattr(self, 'f1') and hasattr(self, 'f2')
# These are not just convenient aliases for the given
# attributes; if `self` would creep into the returned closure,
# that would ensure that a reference to this big, fat object
# would be kept alive; hence, any hash function would carry
# around all of the auxiliary state that was created during the
# generation of the hash parameters. Omitting `self` ensures
# this object has a chance to be garbage collected.
f1, f2, g = self.f1, self.f2, self.g
def czech_hash(word):
v1 = f1(word)
v2 = f2(word)
return g[v1] + g[v2]
return czech_hash | python | def hash_function(self):
"""
Returns the hash function proper. Ensures that `self` is not bound to
the returned closure.
"""
assert hasattr(self, 'f1') and hasattr(self, 'f2')
# These are not just convenient aliases for the given
# attributes; if `self` would creep into the returned closure,
# that would ensure that a reference to this big, fat object
# would be kept alive; hence, any hash function would carry
# around all of the auxiliary state that was created during the
# generation of the hash parameters. Omitting `self` ensures
# this object has a chance to be garbage collected.
f1, f2, g = self.f1, self.f2, self.g
def czech_hash(word):
v1 = f1(word)
v2 = f2(word)
return g[v1] + g[v2]
return czech_hash | [
"def",
"hash_function",
"(",
"self",
")",
":",
"assert",
"hasattr",
"(",
"self",
",",
"'f1'",
")",
"and",
"hasattr",
"(",
"self",
",",
"'f2'",
")",
"# These are not just convenient aliases for the given",
"# attributes; if `self` would creep into the returned closure,",
"... | Returns the hash function proper. Ensures that `self` is not bound to
the returned closure. | [
"Returns",
"the",
"hash",
"function",
"proper",
".",
"Ensures",
"that",
"self",
"is",
"not",
"bound",
"to",
"the",
"returned",
"closure",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L62-L83 | train | 42,449 |
eddieantonio/perfection | perfection/czech.py | CzechHashBuilder.generate_acyclic_graph | def generate_acyclic_graph(self):
"""
Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object.
"""
# Maximum length of each table, respectively.
# Hardcoded n = cm, where c = 3
# There might be a good way to choose an appropriate C,
# but [1] suggests the average amount of iterations needed
# to generate an acyclic graph is sqrt(3).
self.n = 3 * len(self.words)
max_tries = len(self.words) ** 2
for trial in range(max_tries):
try:
self.generate_or_fail()
except forest.InvariantError:
continue
else:
# Generated successfully!
self.trials_taken = trial + 1
return
raise RuntimeError("Could not generate graph in "
"{} tries".format(max_tries)) | python | def generate_acyclic_graph(self):
"""
Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object.
"""
# Maximum length of each table, respectively.
# Hardcoded n = cm, where c = 3
# There might be a good way to choose an appropriate C,
# but [1] suggests the average amount of iterations needed
# to generate an acyclic graph is sqrt(3).
self.n = 3 * len(self.words)
max_tries = len(self.words) ** 2
for trial in range(max_tries):
try:
self.generate_or_fail()
except forest.InvariantError:
continue
else:
# Generated successfully!
self.trials_taken = trial + 1
return
raise RuntimeError("Could not generate graph in "
"{} tries".format(max_tries)) | [
"def",
"generate_acyclic_graph",
"(",
"self",
")",
":",
"# Maximum length of each table, respectively.",
"# Hardcoded n = cm, where c = 3",
"# There might be a good way to choose an appropriate C,",
"# but [1] suggests the average amount of iterations needed",
"# to generate an acyclic graph is ... | Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object. | [
"Generates",
"an",
"acyclic",
"graph",
"for",
"the",
"given",
"words",
".",
"Adds",
"the",
"graph",
"and",
"a",
"list",
"of",
"edge",
"-",
"word",
"associations",
"to",
"the",
"object",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L87-L112 | train | 42,450 |
eddieantonio/perfection | perfection/czech.py | CzechHashBuilder.generate_random_table | def generate_random_table(self):
"""
Generates random tables for given word lists.
"""
table = list(range(0, self.n))
random.shuffle(table)
return table | python | def generate_random_table(self):
"""
Generates random tables for given word lists.
"""
table = list(range(0, self.n))
random.shuffle(table)
return table | [
"def",
"generate_random_table",
"(",
"self",
")",
":",
"table",
"=",
"list",
"(",
"range",
"(",
"0",
",",
"self",
".",
"n",
")",
")",
"random",
".",
"shuffle",
"(",
"table",
")",
"return",
"table"
] | Generates random tables for given word lists. | [
"Generates",
"random",
"tables",
"for",
"given",
"word",
"lists",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L114-L120 | train | 42,451 |
eddieantonio/perfection | perfection/czech.py | CzechHashBuilder.generate_or_fail | def generate_or_fail(self):
"""
Attempts to generate a random acyclic graph, raising an
InvariantError if unable to.
"""
t1 = self.generate_random_table()
t2 = self.generate_random_table()
f1 = self.generate_func(t1)
f2 = self.generate_func(t2)
edges = [(f1(word), f2(word)) for word in self.words]
# Try to generate that graph, mack!
# Note that failure to generate the graph here should be caught
# by the caller.
graph = forest.ForestGraph(edges=edges)
# Associate each edge with its corresponding word.
associations = {}
for num in range(len(self.words)):
edge = edges[num]
word = self.words[num]
associations[graph.canonical_order(edge)] = (num, word)
# Assign all of these to the object.
for name in ('t1', 't2', 'f1', 'f2', 'graph', 'associations'):
self.__dict__[name] = locals()[name] | python | def generate_or_fail(self):
"""
Attempts to generate a random acyclic graph, raising an
InvariantError if unable to.
"""
t1 = self.generate_random_table()
t2 = self.generate_random_table()
f1 = self.generate_func(t1)
f2 = self.generate_func(t2)
edges = [(f1(word), f2(word)) for word in self.words]
# Try to generate that graph, mack!
# Note that failure to generate the graph here should be caught
# by the caller.
graph = forest.ForestGraph(edges=edges)
# Associate each edge with its corresponding word.
associations = {}
for num in range(len(self.words)):
edge = edges[num]
word = self.words[num]
associations[graph.canonical_order(edge)] = (num, word)
# Assign all of these to the object.
for name in ('t1', 't2', 'f1', 'f2', 'graph', 'associations'):
self.__dict__[name] = locals()[name] | [
"def",
"generate_or_fail",
"(",
"self",
")",
":",
"t1",
"=",
"self",
".",
"generate_random_table",
"(",
")",
"t2",
"=",
"self",
".",
"generate_random_table",
"(",
")",
"f1",
"=",
"self",
".",
"generate_func",
"(",
"t1",
")",
"f2",
"=",
"self",
".",
"ge... | Attempts to generate a random acyclic graph, raising an
InvariantError if unable to. | [
"Attempts",
"to",
"generate",
"a",
"random",
"acyclic",
"graph",
"raising",
"an",
"InvariantError",
"if",
"unable",
"to",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L122-L148 | train | 42,452 |
eddieantonio/perfection | perfection/czech.py | CzechHashBuilder.generate_func | def generate_func(self, table):
"""
Generates a random table based mini-hashing function.
"""
# Ensure that `self` isn't suddenly in the closure...
n = self.n
def func(word):
return sum(x * ord(c) for x, c in zip(table, word)) % n
return func | python | def generate_func(self, table):
"""
Generates a random table based mini-hashing function.
"""
# Ensure that `self` isn't suddenly in the closure...
n = self.n
def func(word):
return sum(x * ord(c) for x, c in zip(table, word)) % n
return func | [
"def",
"generate_func",
"(",
"self",
",",
"table",
")",
":",
"# Ensure that `self` isn't suddenly in the closure...",
"n",
"=",
"self",
".",
"n",
"def",
"func",
"(",
"word",
")",
":",
"return",
"sum",
"(",
"x",
"*",
"ord",
"(",
"c",
")",
"for",
"x",
",",... | Generates a random table based mini-hashing function. | [
"Generates",
"a",
"random",
"table",
"based",
"mini",
"-",
"hashing",
"function",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/czech.py#L150-L161 | train | 42,453 |
eddieantonio/perfection | perfection/utils.py | create_dict_subclass | def create_dict_subclass(name, hash_func, slots, doc):
"""
Creates a dict subclass named name, using the hash_function to index
hash_length items. Doc should be any additional documentation added to the
class.
"""
hash_length = len(slots)
# Returns array index -- raises a KeyError if the key does not match
# its slot value.
def index_or_key_error(key):
index = hash_func(key)
# Make sure the key is **exactly** the same.
if key != slots[index]:
raise KeyError(key)
return index
def init(self, *args, **kwargs):
self._arr = [None] * hash_length
self._len = 0
# Delegate initialization to update provided by MutableMapping:
self.update(*args, **kwargs)
def getitem(self, key):
index = index_or_key_error(key)
if self._arr[index] is None:
raise KeyError(key)
return self._arr[index][1]
def setitem(self, key, value):
index = index_or_key_error(key)
self._arr[index] = (key, value)
def delitem(self, key):
index = index_or_key_error(key)
if self._arr[index] is None:
raise KeyError(key)
self._arr[index] = None
def dict_iter(self):
return (pair[0] for pair in self._arr if pair is not None)
def dict_len(self):
# TODO: Make this O(1) using auxiliary state?
return sum(1 for _ in self)
def dict_repr(self):
arr_repr = (repr(pair) for pair in self._arr if pair is not None)
return ''.join((name, '([', ', '.join(arr_repr), '])'))
# Inheriting from MutableMapping gives us a whole whackload of methods for
# free.
bases = (collections.MutableMapping,)
return type(name, bases, {
'__init__': init,
'__doc__': doc,
'__getitem__': getitem,
'__setitem__': setitem,
'__delitem__': delitem,
'__iter__': dict_iter,
'__len__': dict_len,
'__repr__': dict_repr,
}) | python | def create_dict_subclass(name, hash_func, slots, doc):
"""
Creates a dict subclass named name, using the hash_function to index
hash_length items. Doc should be any additional documentation added to the
class.
"""
hash_length = len(slots)
# Returns array index -- raises a KeyError if the key does not match
# its slot value.
def index_or_key_error(key):
index = hash_func(key)
# Make sure the key is **exactly** the same.
if key != slots[index]:
raise KeyError(key)
return index
def init(self, *args, **kwargs):
self._arr = [None] * hash_length
self._len = 0
# Delegate initialization to update provided by MutableMapping:
self.update(*args, **kwargs)
def getitem(self, key):
index = index_or_key_error(key)
if self._arr[index] is None:
raise KeyError(key)
return self._arr[index][1]
def setitem(self, key, value):
index = index_or_key_error(key)
self._arr[index] = (key, value)
def delitem(self, key):
index = index_or_key_error(key)
if self._arr[index] is None:
raise KeyError(key)
self._arr[index] = None
def dict_iter(self):
return (pair[0] for pair in self._arr if pair is not None)
def dict_len(self):
# TODO: Make this O(1) using auxiliary state?
return sum(1 for _ in self)
def dict_repr(self):
arr_repr = (repr(pair) for pair in self._arr if pair is not None)
return ''.join((name, '([', ', '.join(arr_repr), '])'))
# Inheriting from MutableMapping gives us a whole whackload of methods for
# free.
bases = (collections.MutableMapping,)
return type(name, bases, {
'__init__': init,
'__doc__': doc,
'__getitem__': getitem,
'__setitem__': setitem,
'__delitem__': delitem,
'__iter__': dict_iter,
'__len__': dict_len,
'__repr__': dict_repr,
}) | [
"def",
"create_dict_subclass",
"(",
"name",
",",
"hash_func",
",",
"slots",
",",
"doc",
")",
":",
"hash_length",
"=",
"len",
"(",
"slots",
")",
"# Returns array index -- raises a KeyError if the key does not match",
"# its slot value.",
"def",
"index_or_key_error",
"(",
... | Creates a dict subclass named name, using the hash_function to index
hash_length items. Doc should be any additional documentation added to the
class. | [
"Creates",
"a",
"dict",
"subclass",
"named",
"name",
"using",
"the",
"hash_function",
"to",
"index",
"hash_length",
"items",
".",
"Doc",
"should",
"be",
"any",
"additional",
"documentation",
"added",
"to",
"the",
"class",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/utils.py#L10-L77 | train | 42,454 |
karimbahgat/PyGeoj | pygeoj.py | validate | def validate(data, skiperrors=False, fixerrors=True):
"""Checks that the geojson data is a feature collection, that it
contains a proper "features" attribute, and that all features are valid too.
Returns True if all goes well.
- skiperrors will throw away any features that fail to validate.
- fixerrors will attempt to auto fix any minor errors without raising exceptions.
"""
if not "type" in data:
if fixerrors:
data["type"] = "FeatureCollection"
else:
raise ValueError("The geojson data needs to have a type key")
if not data["type"] == "FeatureCollection":
if fixerrors:
data["type"] = "FeatureCollection"
else:
raise ValueError("The geojson data needs to be a feature collection")
if "features" in data:
if not isinstance(data["features"], list):
raise ValueError("The features property needs to be a list")
else: raise ValueError("The FeatureCollection needs to contain a 'features' property")
if skiperrors:
for featuredict in data["features"]:
feat = Feature(featuredict)
try: feat.validate(fixerrors)
except: data["features"].remove(featuredict)
else:
for featuredict in data["features"]:
feat = Feature(featuredict)
feat.validate(fixerrors)
return True | python | def validate(data, skiperrors=False, fixerrors=True):
"""Checks that the geojson data is a feature collection, that it
contains a proper "features" attribute, and that all features are valid too.
Returns True if all goes well.
- skiperrors will throw away any features that fail to validate.
- fixerrors will attempt to auto fix any minor errors without raising exceptions.
"""
if not "type" in data:
if fixerrors:
data["type"] = "FeatureCollection"
else:
raise ValueError("The geojson data needs to have a type key")
if not data["type"] == "FeatureCollection":
if fixerrors:
data["type"] = "FeatureCollection"
else:
raise ValueError("The geojson data needs to be a feature collection")
if "features" in data:
if not isinstance(data["features"], list):
raise ValueError("The features property needs to be a list")
else: raise ValueError("The FeatureCollection needs to contain a 'features' property")
if skiperrors:
for featuredict in data["features"]:
feat = Feature(featuredict)
try: feat.validate(fixerrors)
except: data["features"].remove(featuredict)
else:
for featuredict in data["features"]:
feat = Feature(featuredict)
feat.validate(fixerrors)
return True | [
"def",
"validate",
"(",
"data",
",",
"skiperrors",
"=",
"False",
",",
"fixerrors",
"=",
"True",
")",
":",
"if",
"not",
"\"type\"",
"in",
"data",
":",
"if",
"fixerrors",
":",
"data",
"[",
"\"type\"",
"]",
"=",
"\"FeatureCollection\"",
"else",
":",
"raise"... | Checks that the geojson data is a feature collection, that it
contains a proper "features" attribute, and that all features are valid too.
Returns True if all goes well.
- skiperrors will throw away any features that fail to validate.
- fixerrors will attempt to auto fix any minor errors without raising exceptions. | [
"Checks",
"that",
"the",
"geojson",
"data",
"is",
"a",
"feature",
"collection",
"that",
"it",
"contains",
"a",
"proper",
"features",
"attribute",
"and",
"that",
"all",
"features",
"are",
"valid",
"too",
".",
"Returns",
"True",
"if",
"all",
"goes",
"well",
... | b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5 | https://github.com/karimbahgat/PyGeoj/blob/b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5/pygeoj.py#L733-L768 | train | 42,455 |
karimbahgat/PyGeoj | pygeoj.py | Geometry.validate | def validate(self, fixerrors=True):
"""
Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid.
"""
# validate nullgeometry or has type and coordinates keys
if not self._data:
# null geometry, no further checking needed
return True
elif "type" not in self._data or "coordinates" not in self._data:
raise Exception("A geometry dictionary or instance must have the type and coordinates entries")
# first validate geometry type
if not self.type in ("Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"):
if fixerrors:
coretype = self.type.lower().replace("multi","")
if coretype == "point":
newtype = "Point"
elif coretype == "linestring":
newtype = "LineString"
elif coretype == "polygon":
newtype = "Polygon"
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
if self.type.lower().startswith("multi"):
newtype = "Multi" + newtype
self.type = newtype
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
# then validate coordinate data type
coords = self._data["coordinates"]
if not isinstance(coords, (list,tuple)): raise Exception("Coordinates must be a list or tuple type")
# then validate coordinate structures
if self.type == "Point":
if not len(coords) == 2: raise Exception("Point must be one coordinate pair")
elif self.type in ("MultiPoint","LineString"):
if not len(coords) > 1: raise Exception("MultiPoint and LineString must have more than one coordinates")
elif self.type == "MultiLineString":
for line in coords:
if not len(line) > 1: raise Exception("All LineStrings in a MultiLineString must have more than one coordinate")
elif self.type == "Polygon":
for exterior_or_holes in coords:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in a Polygon must have at least 3 coordinates")
elif self.type == "MultiPolygon":
for eachmulti in coords:
for exterior_or_holes in eachmulti:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in all Polygons of a MultiPolygon must have at least 3 coordinates")
# validation successful
return True | python | def validate(self, fixerrors=True):
"""
Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid.
"""
# validate nullgeometry or has type and coordinates keys
if not self._data:
# null geometry, no further checking needed
return True
elif "type" not in self._data or "coordinates" not in self._data:
raise Exception("A geometry dictionary or instance must have the type and coordinates entries")
# first validate geometry type
if not self.type in ("Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"):
if fixerrors:
coretype = self.type.lower().replace("multi","")
if coretype == "point":
newtype = "Point"
elif coretype == "linestring":
newtype = "LineString"
elif coretype == "polygon":
newtype = "Polygon"
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
if self.type.lower().startswith("multi"):
newtype = "Multi" + newtype
self.type = newtype
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
# then validate coordinate data type
coords = self._data["coordinates"]
if not isinstance(coords, (list,tuple)): raise Exception("Coordinates must be a list or tuple type")
# then validate coordinate structures
if self.type == "Point":
if not len(coords) == 2: raise Exception("Point must be one coordinate pair")
elif self.type in ("MultiPoint","LineString"):
if not len(coords) > 1: raise Exception("MultiPoint and LineString must have more than one coordinates")
elif self.type == "MultiLineString":
for line in coords:
if not len(line) > 1: raise Exception("All LineStrings in a MultiLineString must have more than one coordinate")
elif self.type == "Polygon":
for exterior_or_holes in coords:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in a Polygon must have at least 3 coordinates")
elif self.type == "MultiPolygon":
for eachmulti in coords:
for exterior_or_holes in eachmulti:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in all Polygons of a MultiPolygon must have at least 3 coordinates")
# validation successful
return True | [
"def",
"validate",
"(",
"self",
",",
"fixerrors",
"=",
"True",
")",
":",
"# validate nullgeometry or has type and coordinates keys",
"if",
"not",
"self",
".",
"_data",
":",
"# null geometry, no further checking needed",
"return",
"True",
"elif",
"\"type\"",
"not",
"in",... | Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid. | [
"Validates",
"that",
"the",
"geometry",
"is",
"correctly",
"formatted",
"according",
"to",
"the",
"geometry",
"type",
"."
] | b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5 | https://github.com/karimbahgat/PyGeoj/blob/b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5/pygeoj.py#L259-L324 | train | 42,456 |
karimbahgat/PyGeoj | pygeoj.py | Feature.validate | def validate(self, fixerrors=True):
"""
Validates that the feature is correctly formatted.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the feature is valid.
Raises:
- An Exception if not valid.
"""
if not "type" in self._data or self._data["type"] != "Feature":
if fixerrors:
self._data["type"] = "Feature"
else:
raise Exception("A geojson feature dictionary must contain a type key and it must be named 'Feature'.")
if not "geometry" in self._data:
if fixerrors:
self.geometry = Geometry() # nullgeometry
else:
raise Exception("A geojson feature dictionary must contain a geometry key.")
if not "properties" in self._data or not isinstance(self.properties,dict):
if fixerrors:
self._data["properties"] = dict()
else:
raise Exception("A geojson feature dictionary must contain a properties key and it must be a dictionary type.")
self.geometry.validate(fixerrors)
return True | python | def validate(self, fixerrors=True):
"""
Validates that the feature is correctly formatted.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the feature is valid.
Raises:
- An Exception if not valid.
"""
if not "type" in self._data or self._data["type"] != "Feature":
if fixerrors:
self._data["type"] = "Feature"
else:
raise Exception("A geojson feature dictionary must contain a type key and it must be named 'Feature'.")
if not "geometry" in self._data:
if fixerrors:
self.geometry = Geometry() # nullgeometry
else:
raise Exception("A geojson feature dictionary must contain a geometry key.")
if not "properties" in self._data or not isinstance(self.properties,dict):
if fixerrors:
self._data["properties"] = dict()
else:
raise Exception("A geojson feature dictionary must contain a properties key and it must be a dictionary type.")
self.geometry.validate(fixerrors)
return True | [
"def",
"validate",
"(",
"self",
",",
"fixerrors",
"=",
"True",
")",
":",
"if",
"not",
"\"type\"",
"in",
"self",
".",
"_data",
"or",
"self",
".",
"_data",
"[",
"\"type\"",
"]",
"!=",
"\"Feature\"",
":",
"if",
"fixerrors",
":",
"self",
".",
"_data",
"[... | Validates that the feature is correctly formatted.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the feature is valid.
Raises:
- An Exception if not valid. | [
"Validates",
"that",
"the",
"feature",
"is",
"correctly",
"formatted",
"."
] | b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5 | https://github.com/karimbahgat/PyGeoj/blob/b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5/pygeoj.py#L392-L424 | train | 42,457 |
karimbahgat/PyGeoj | pygeoj.py | GeojsonFile.add_feature | def add_feature(self, obj=None, geometry=None, properties=None):
"""
Adds a given feature. If obj isn't specified, geometry and properties can be set as arguments directly.
Parameters:
- **obj**: Another feature instance, an object with the \_\_geo_interface__ or a geojson dictionary of the Feature type.
- **geometry** (optional): Anything that the Geometry instance can accept.
- **properties** (optional): A dictionary of key-value property pairs.
"""
properties = properties or {}
if isinstance(obj, Feature):
# instead of creating copy, the original feat should reference the same one that was added here
feat = obj._data
elif isinstance(obj, dict):
feat = obj.copy()
else:
feat = Feature(geometry=geometry, properties=properties).__geo_interface__
self._data["features"].append(feat) | python | def add_feature(self, obj=None, geometry=None, properties=None):
"""
Adds a given feature. If obj isn't specified, geometry and properties can be set as arguments directly.
Parameters:
- **obj**: Another feature instance, an object with the \_\_geo_interface__ or a geojson dictionary of the Feature type.
- **geometry** (optional): Anything that the Geometry instance can accept.
- **properties** (optional): A dictionary of key-value property pairs.
"""
properties = properties or {}
if isinstance(obj, Feature):
# instead of creating copy, the original feat should reference the same one that was added here
feat = obj._data
elif isinstance(obj, dict):
feat = obj.copy()
else:
feat = Feature(geometry=geometry, properties=properties).__geo_interface__
self._data["features"].append(feat) | [
"def",
"add_feature",
"(",
"self",
",",
"obj",
"=",
"None",
",",
"geometry",
"=",
"None",
",",
"properties",
"=",
"None",
")",
":",
"properties",
"=",
"properties",
"or",
"{",
"}",
"if",
"isinstance",
"(",
"obj",
",",
"Feature",
")",
":",
"# instead of... | Adds a given feature. If obj isn't specified, geometry and properties can be set as arguments directly.
Parameters:
- **obj**: Another feature instance, an object with the \_\_geo_interface__ or a geojson dictionary of the Feature type.
- **geometry** (optional): Anything that the Geometry instance can accept.
- **properties** (optional): A dictionary of key-value property pairs. | [
"Adds",
"a",
"given",
"feature",
".",
"If",
"obj",
"isn",
"t",
"specified",
"geometry",
"and",
"properties",
"can",
"be",
"set",
"as",
"arguments",
"directly",
"."
] | b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5 | https://github.com/karimbahgat/PyGeoj/blob/b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5/pygeoj.py#L550-L568 | train | 42,458 |
karimbahgat/PyGeoj | pygeoj.py | GeojsonFile.add_unique_id | def add_unique_id(self):
"""
Adds a unique id property to each feature.
Raises:
- An Exception if any of the features already
have an "id" field.
"""
uid = 0
for feature in self._data["features"]:
if feature["properties"].get("id"):
raise Exception("one of the features already had an id field")
feature["properties"]["id"] = uid
uid += 1 | python | def add_unique_id(self):
"""
Adds a unique id property to each feature.
Raises:
- An Exception if any of the features already
have an "id" field.
"""
uid = 0
for feature in self._data["features"]:
if feature["properties"].get("id"):
raise Exception("one of the features already had an id field")
feature["properties"]["id"] = uid
uid += 1 | [
"def",
"add_unique_id",
"(",
"self",
")",
":",
"uid",
"=",
"0",
"for",
"feature",
"in",
"self",
".",
"_data",
"[",
"\"features\"",
"]",
":",
"if",
"feature",
"[",
"\"properties\"",
"]",
".",
"get",
"(",
"\"id\"",
")",
":",
"raise",
"Exception",
"(",
... | Adds a unique id property to each feature.
Raises:
- An Exception if any of the features already
have an "id" field. | [
"Adds",
"a",
"unique",
"id",
"property",
"to",
"each",
"feature",
"."
] | b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5 | https://github.com/karimbahgat/PyGeoj/blob/b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5/pygeoj.py#L666-L681 | train | 42,459 |
karimbahgat/PyGeoj | pygeoj.py | GeojsonFile.add_all_bboxes | def add_all_bboxes(self):
"""
Calculates and adds a bbox attribute to the geojson entry of all feature geometries, updating any existing ones.
"""
for feature in self:
if feature.geometry.type != "Null":
feature.geometry._data["bbox"] = Feature(feature).geometry.bbox | python | def add_all_bboxes(self):
"""
Calculates and adds a bbox attribute to the geojson entry of all feature geometries, updating any existing ones.
"""
for feature in self:
if feature.geometry.type != "Null":
feature.geometry._data["bbox"] = Feature(feature).geometry.bbox | [
"def",
"add_all_bboxes",
"(",
"self",
")",
":",
"for",
"feature",
"in",
"self",
":",
"if",
"feature",
".",
"geometry",
".",
"type",
"!=",
"\"Null\"",
":",
"feature",
".",
"geometry",
".",
"_data",
"[",
"\"bbox\"",
"]",
"=",
"Feature",
"(",
"feature",
"... | Calculates and adds a bbox attribute to the geojson entry of all feature geometries, updating any existing ones. | [
"Calculates",
"and",
"adds",
"a",
"bbox",
"attribute",
"to",
"the",
"geojson",
"entry",
"of",
"all",
"feature",
"geometries",
"updating",
"any",
"existing",
"ones",
"."
] | b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5 | https://github.com/karimbahgat/PyGeoj/blob/b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5/pygeoj.py#L683-L689 | train | 42,460 |
karimbahgat/PyGeoj | pygeoj.py | GeojsonFile.save | def save(self, savepath, **kwargs):
"""
Saves the geojson instance to file. To save with a different text encoding use the 'encoding' argument.
Parameters:
- **savepath**: Filepath to save the file.
"""
self.update_bbox()
tempfile = open(savepath,"w")
json.dump(self._data, tempfile, **kwargs)
tempfile.close() | python | def save(self, savepath, **kwargs):
"""
Saves the geojson instance to file. To save with a different text encoding use the 'encoding' argument.
Parameters:
- **savepath**: Filepath to save the file.
"""
self.update_bbox()
tempfile = open(savepath,"w")
json.dump(self._data, tempfile, **kwargs)
tempfile.close() | [
"def",
"save",
"(",
"self",
",",
"savepath",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"update_bbox",
"(",
")",
"tempfile",
"=",
"open",
"(",
"savepath",
",",
"\"w\"",
")",
"json",
".",
"dump",
"(",
"self",
".",
"_data",
",",
"tempfile",
",",... | Saves the geojson instance to file. To save with a different text encoding use the 'encoding' argument.
Parameters:
- **savepath**: Filepath to save the file. | [
"Saves",
"the",
"geojson",
"instance",
"to",
"file",
".",
"To",
"save",
"with",
"a",
"different",
"text",
"encoding",
"use",
"the",
"encoding",
"argument",
"."
] | b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5 | https://github.com/karimbahgat/PyGeoj/blob/b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5/pygeoj.py#L691-L703 | train | 42,461 |
karimbahgat/PyGeoj | pygeoj.py | GeojsonFile._prepdata | def _prepdata(self):
"""Adds potentially missing items to the geojson dictionary"""
# if missing, compute and add bbox
if not self._data.get("bbox"):
self.update_bbox()
# if missing, set crs to default crs (WGS84), see http://geojson.org/geojson-spec.html
if not self._data.get("crs"):
self._data["crs"] = {"type":"name",
"properties":{"name":"urn:ogc:def:crs:OGC:2:84"}} | python | def _prepdata(self):
"""Adds potentially missing items to the geojson dictionary"""
# if missing, compute and add bbox
if not self._data.get("bbox"):
self.update_bbox()
# if missing, set crs to default crs (WGS84), see http://geojson.org/geojson-spec.html
if not self._data.get("crs"):
self._data["crs"] = {"type":"name",
"properties":{"name":"urn:ogc:def:crs:OGC:2:84"}} | [
"def",
"_prepdata",
"(",
"self",
")",
":",
"# if missing, compute and add bbox",
"if",
"not",
"self",
".",
"_data",
".",
"get",
"(",
"\"bbox\"",
")",
":",
"self",
".",
"update_bbox",
"(",
")",
"# if missing, set crs to default crs (WGS84), see http://geojson.org/geojson... | Adds potentially missing items to the geojson dictionary | [
"Adds",
"potentially",
"missing",
"items",
"to",
"the",
"geojson",
"dictionary"
] | b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5 | https://github.com/karimbahgat/PyGeoj/blob/b0f4e5f6a2d8289526fc7ee64a7b48d7b9a622a5/pygeoj.py#L717-L727 | train | 42,462 |
eddieantonio/perfection | perfection/getty.py | place_items_in_square | def place_items_in_square(items, t):
"""
Returns a list of rows that are stored as a priority queue to be
used with heapq functions.
>>> place_items_in_square([1,5,7], 4)
[(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])]
>>> place_items_in_square([1,5,7], 3)
[(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])]
"""
# A minheap (because that's all that heapq supports :/)
# of the length of each row. Why this is important is because
# we'll be popping the largest rows when figuring out row displacements.
# Each item is a tuple of (t - |row|, y, [(xpos_1, item_1), ...]).
# Until the call to heapq.heapify(), the rows are ordered in
# increasing row number (y).
rows = [(t, y, []) for y in range(t)]
for item in items:
# Calculate the cell the item should fall in.
x = item % t
y = item // t
# Push the item to its corresponding row...
inverse_length, _, row_contents = rows[y]
heapq.heappush(row_contents, (x, item))
# Ensure the heap key is kept intact.
rows[y] = inverse_length - 1, y, row_contents
assert all(inv_len == t - len(rows) for inv_len, _, rows in rows)
heapq.heapify(rows)
# Return only rows that are populated.
return [row for row in rows if row[2]] | python | def place_items_in_square(items, t):
"""
Returns a list of rows that are stored as a priority queue to be
used with heapq functions.
>>> place_items_in_square([1,5,7], 4)
[(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])]
>>> place_items_in_square([1,5,7], 3)
[(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])]
"""
# A minheap (because that's all that heapq supports :/)
# of the length of each row. Why this is important is because
# we'll be popping the largest rows when figuring out row displacements.
# Each item is a tuple of (t - |row|, y, [(xpos_1, item_1), ...]).
# Until the call to heapq.heapify(), the rows are ordered in
# increasing row number (y).
rows = [(t, y, []) for y in range(t)]
for item in items:
# Calculate the cell the item should fall in.
x = item % t
y = item // t
# Push the item to its corresponding row...
inverse_length, _, row_contents = rows[y]
heapq.heappush(row_contents, (x, item))
# Ensure the heap key is kept intact.
rows[y] = inverse_length - 1, y, row_contents
assert all(inv_len == t - len(rows) for inv_len, _, rows in rows)
heapq.heapify(rows)
# Return only rows that are populated.
return [row for row in rows if row[2]] | [
"def",
"place_items_in_square",
"(",
"items",
",",
"t",
")",
":",
"# A minheap (because that's all that heapq supports :/)",
"# of the length of each row. Why this is important is because",
"# we'll be popping the largest rows when figuring out row displacements.",
"# Each item is a tuple of (... | Returns a list of rows that are stored as a priority queue to be
used with heapq functions.
>>> place_items_in_square([1,5,7], 4)
[(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])]
>>> place_items_in_square([1,5,7], 3)
[(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])] | [
"Returns",
"a",
"list",
"of",
"rows",
"that",
"are",
"stored",
"as",
"a",
"priority",
"queue",
"to",
"be",
"used",
"with",
"heapq",
"functions",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/getty.py#L118-L154 | train | 42,463 |
eddieantonio/perfection | perfection/getty.py | find_first_fit | def find_first_fit(unoccupied_columns, row, row_length):
"""
Finds the first index that the row's items can fit.
"""
for free_col in unoccupied_columns:
# The offset is that such that the first item goes in the free column.
first_item_x = row[0][0]
offset = free_col - first_item_x
if check_columns_fit(unoccupied_columns, row, offset, row_length):
return offset
raise ValueError("Row cannot bossily fit in %r: %r"
% (list(unoccupied_columns.keys()), row)) | python | def find_first_fit(unoccupied_columns, row, row_length):
"""
Finds the first index that the row's items can fit.
"""
for free_col in unoccupied_columns:
# The offset is that such that the first item goes in the free column.
first_item_x = row[0][0]
offset = free_col - first_item_x
if check_columns_fit(unoccupied_columns, row, offset, row_length):
return offset
raise ValueError("Row cannot bossily fit in %r: %r"
% (list(unoccupied_columns.keys()), row)) | [
"def",
"find_first_fit",
"(",
"unoccupied_columns",
",",
"row",
",",
"row_length",
")",
":",
"for",
"free_col",
"in",
"unoccupied_columns",
":",
"# The offset is that such that the first item goes in the free column.",
"first_item_x",
"=",
"row",
"[",
"0",
"]",
"[",
"0"... | Finds the first index that the row's items can fit. | [
"Finds",
"the",
"first",
"index",
"that",
"the",
"row",
"s",
"items",
"can",
"fit",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/getty.py#L205-L217 | train | 42,464 |
eddieantonio/perfection | perfection/getty.py | check_columns_fit | def check_columns_fit(unoccupied_columns, row, offset, row_length):
"""
Checks if all the occupied columns in the row fit in the indices
given by free columns.
>>> check_columns_fit({0,1,2,3}, [(0, True), (2, True)], 0, 4)
True
>>> check_columns_fit({0,2,3}, [(2, True), (3, True)], 0, 4)
True
>>> check_columns_fit({}, [(2, True), (3, True)], 0, 4)
False
>>> check_columns_fit({0}, [(2, True)], 2, 4)
True
>>> check_columns_fit({0}, [(3, True)], 2, 4)
False
"""
for index, item in row:
adjusted_index = (index + offset) % row_length
# Check if the index is in the appropriate place.
if adjusted_index not in unoccupied_columns:
return False
return True | python | def check_columns_fit(unoccupied_columns, row, offset, row_length):
"""
Checks if all the occupied columns in the row fit in the indices
given by free columns.
>>> check_columns_fit({0,1,2,3}, [(0, True), (2, True)], 0, 4)
True
>>> check_columns_fit({0,2,3}, [(2, True), (3, True)], 0, 4)
True
>>> check_columns_fit({}, [(2, True), (3, True)], 0, 4)
False
>>> check_columns_fit({0}, [(2, True)], 2, 4)
True
>>> check_columns_fit({0}, [(3, True)], 2, 4)
False
"""
for index, item in row:
adjusted_index = (index + offset) % row_length
# Check if the index is in the appropriate place.
if adjusted_index not in unoccupied_columns:
return False
return True | [
"def",
"check_columns_fit",
"(",
"unoccupied_columns",
",",
"row",
",",
"offset",
",",
"row_length",
")",
":",
"for",
"index",
",",
"item",
"in",
"row",
":",
"adjusted_index",
"=",
"(",
"index",
"+",
"offset",
")",
"%",
"row_length",
"# Check if the index is i... | Checks if all the occupied columns in the row fit in the indices
given by free columns.
>>> check_columns_fit({0,1,2,3}, [(0, True), (2, True)], 0, 4)
True
>>> check_columns_fit({0,2,3}, [(2, True), (3, True)], 0, 4)
True
>>> check_columns_fit({}, [(2, True), (3, True)], 0, 4)
False
>>> check_columns_fit({0}, [(2, True)], 2, 4)
True
>>> check_columns_fit({0}, [(3, True)], 2, 4)
False | [
"Checks",
"if",
"all",
"the",
"occupied",
"columns",
"in",
"the",
"row",
"fit",
"in",
"the",
"indices",
"given",
"by",
"free",
"columns",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/getty.py#L220-L244 | train | 42,465 |
eddieantonio/perfection | perfection/getty.py | print_square | def print_square(row_queue, t):
"""
Prints a row queue as its conceptual square array.
"""
occupied_rows = {y: row for _, y, row in row_queue}
empty_row = ', '.join('...' for _ in range(t))
for y in range(t):
print('|', end=' ')
if y not in occupied_rows:
print(empty_row, end=' ')
else:
row = dict(occupied_rows[y])
all_cols = ('%3d' % row[x] if x in row else '...'
for x in range(t))
print(', '.join(all_cols), end=' ')
print("|") | python | def print_square(row_queue, t):
"""
Prints a row queue as its conceptual square array.
"""
occupied_rows = {y: row for _, y, row in row_queue}
empty_row = ', '.join('...' for _ in range(t))
for y in range(t):
print('|', end=' ')
if y not in occupied_rows:
print(empty_row, end=' ')
else:
row = dict(occupied_rows[y])
all_cols = ('%3d' % row[x] if x in row else '...'
for x in range(t))
print(', '.join(all_cols), end=' ')
print("|") | [
"def",
"print_square",
"(",
"row_queue",
",",
"t",
")",
":",
"occupied_rows",
"=",
"{",
"y",
":",
"row",
"for",
"_",
",",
"y",
",",
"row",
"in",
"row_queue",
"}",
"empty_row",
"=",
"', '",
".",
"join",
"(",
"'...'",
"for",
"_",
"in",
"range",
"(",
... | Prints a row queue as its conceptual square array. | [
"Prints",
"a",
"row",
"queue",
"as",
"its",
"conceptual",
"square",
"array",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/getty.py#L247-L264 | train | 42,466 |
eddieantonio/perfection | perfection/getty.py | trim_nones_from_right | def trim_nones_from_right(xs):
"""
Returns the list without all the Nones at the right end.
>>> trim_nones_from_right([1, 2, None, 4, None, 5, None, None])
[1, 2, None, 4, None, 5]
"""
# Find the first element that does not contain none.
for i, item in enumerate(reversed(xs)):
if item is not None:
break
return xs[:-i] | python | def trim_nones_from_right(xs):
"""
Returns the list without all the Nones at the right end.
>>> trim_nones_from_right([1, 2, None, 4, None, 5, None, None])
[1, 2, None, 4, None, 5]
"""
# Find the first element that does not contain none.
for i, item in enumerate(reversed(xs)):
if item is not None:
break
return xs[:-i] | [
"def",
"trim_nones_from_right",
"(",
"xs",
")",
":",
"# Find the first element that does not contain none.",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"reversed",
"(",
"xs",
")",
")",
":",
"if",
"item",
"is",
"not",
"None",
":",
"break",
"return",
"xs",... | Returns the list without all the Nones at the right end.
>>> trim_nones_from_right([1, 2, None, 4, None, 5, None, None])
[1, 2, None, 4, None, 5] | [
"Returns",
"the",
"list",
"without",
"all",
"the",
"Nones",
"at",
"the",
"right",
"end",
"."
] | 69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43 | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/getty.py#L267-L280 | train | 42,467 |
ajslater/picopt | picopt/timestamp.py | _get_timestamp | def _get_timestamp(dirname_full, remove):
"""
Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one.
"""
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print('Found timestamp {}:{}'.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime | python | def _get_timestamp(dirname_full, remove):
"""
Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one.
"""
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print('Found timestamp {}:{}'.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime | [
"def",
"_get_timestamp",
"(",
"dirname_full",
",",
"remove",
")",
":",
"record_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname_full",
",",
"RECORD_FILENAME",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"record_filename",
")",
":"... | Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one. | [
"Get",
"the",
"timestamp",
"from",
"the",
"timestamp",
"file",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/timestamp.py#L16-L32 | train | 42,468 |
ajslater/picopt | picopt/timestamp.py | _get_timestamp_cached | def _get_timestamp_cached(dirname_full, remove):
"""
Get the timestamp from the cache or fill the cache
Much quicker than reading the same files over and over
"""
if dirname_full not in TIMESTAMP_CACHE:
mtime = _get_timestamp(dirname_full, remove)
TIMESTAMP_CACHE[dirname_full] = mtime
return TIMESTAMP_CACHE[dirname_full] | python | def _get_timestamp_cached(dirname_full, remove):
"""
Get the timestamp from the cache or fill the cache
Much quicker than reading the same files over and over
"""
if dirname_full not in TIMESTAMP_CACHE:
mtime = _get_timestamp(dirname_full, remove)
TIMESTAMP_CACHE[dirname_full] = mtime
return TIMESTAMP_CACHE[dirname_full] | [
"def",
"_get_timestamp_cached",
"(",
"dirname_full",
",",
"remove",
")",
":",
"if",
"dirname_full",
"not",
"in",
"TIMESTAMP_CACHE",
":",
"mtime",
"=",
"_get_timestamp",
"(",
"dirname_full",
",",
"remove",
")",
"TIMESTAMP_CACHE",
"[",
"dirname_full",
"]",
"=",
"m... | Get the timestamp from the cache or fill the cache
Much quicker than reading the same files over and over | [
"Get",
"the",
"timestamp",
"from",
"the",
"cache",
"or",
"fill",
"the",
"cache",
"Much",
"quicker",
"than",
"reading",
"the",
"same",
"files",
"over",
"and",
"over"
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/timestamp.py#L35-L43 | train | 42,469 |
ajslater/picopt | picopt/timestamp.py | _max_timestamps | def _max_timestamps(dirname_full, remove, compare_tstamp):
"""Compare a timestamp file to one passed in. Get the max."""
tstamp = _get_timestamp_cached(dirname_full, remove)
return max_none((tstamp, compare_tstamp)) | python | def _max_timestamps(dirname_full, remove, compare_tstamp):
"""Compare a timestamp file to one passed in. Get the max."""
tstamp = _get_timestamp_cached(dirname_full, remove)
return max_none((tstamp, compare_tstamp)) | [
"def",
"_max_timestamps",
"(",
"dirname_full",
",",
"remove",
",",
"compare_tstamp",
")",
":",
"tstamp",
"=",
"_get_timestamp_cached",
"(",
"dirname_full",
",",
"remove",
")",
"return",
"max_none",
"(",
"(",
"tstamp",
",",
"compare_tstamp",
")",
")"
] | Compare a timestamp file to one passed in. Get the max. | [
"Compare",
"a",
"timestamp",
"file",
"to",
"one",
"passed",
"in",
".",
"Get",
"the",
"max",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/timestamp.py#L56-L59 | train | 42,470 |
ajslater/picopt | picopt/timestamp.py | _get_parent_timestamp | def _get_parent_timestamp(dirname, mtime):
"""
Get the timestamps up the directory tree. All the way to root.
Because they affect every subdirectory.
"""
parent_pathname = os.path.dirname(dirname)
# max between the parent timestamp the one passed in
mtime = _max_timestamps(parent_pathname, False, mtime)
if dirname != os.path.dirname(parent_pathname):
# this is only called if we're not at the root
mtime = _get_parent_timestamp(parent_pathname, mtime)
return mtime | python | def _get_parent_timestamp(dirname, mtime):
"""
Get the timestamps up the directory tree. All the way to root.
Because they affect every subdirectory.
"""
parent_pathname = os.path.dirname(dirname)
# max between the parent timestamp the one passed in
mtime = _max_timestamps(parent_pathname, False, mtime)
if dirname != os.path.dirname(parent_pathname):
# this is only called if we're not at the root
mtime = _get_parent_timestamp(parent_pathname, mtime)
return mtime | [
"def",
"_get_parent_timestamp",
"(",
"dirname",
",",
"mtime",
")",
":",
"parent_pathname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"dirname",
")",
"# max between the parent timestamp the one passed in",
"mtime",
"=",
"_max_timestamps",
"(",
"parent_pathname",
","... | Get the timestamps up the directory tree. All the way to root.
Because they affect every subdirectory. | [
"Get",
"the",
"timestamps",
"up",
"the",
"directory",
"tree",
".",
"All",
"the",
"way",
"to",
"root",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/timestamp.py#L62-L77 | train | 42,471 |
ajslater/picopt | picopt/timestamp.py | get_walk_after | def get_walk_after(filename, optimize_after=None):
"""
Figure out the which mtime to check against.
If we have to look up the path return that.
"""
if Settings.optimize_after is not None:
return Settings.optimize_after
dirname = os.path.dirname(filename)
if optimize_after is None:
optimize_after = _get_parent_timestamp(dirname, optimize_after)
return _max_timestamps(dirname, True, optimize_after) | python | def get_walk_after(filename, optimize_after=None):
"""
Figure out the which mtime to check against.
If we have to look up the path return that.
"""
if Settings.optimize_after is not None:
return Settings.optimize_after
dirname = os.path.dirname(filename)
if optimize_after is None:
optimize_after = _get_parent_timestamp(dirname, optimize_after)
return _max_timestamps(dirname, True, optimize_after) | [
"def",
"get_walk_after",
"(",
"filename",
",",
"optimize_after",
"=",
"None",
")",
":",
"if",
"Settings",
".",
"optimize_after",
"is",
"not",
"None",
":",
"return",
"Settings",
".",
"optimize_after",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
... | Figure out the which mtime to check against.
If we have to look up the path return that. | [
"Figure",
"out",
"the",
"which",
"mtime",
"to",
"check",
"against",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/timestamp.py#L80-L92 | train | 42,472 |
ajslater/picopt | picopt/timestamp.py | record_timestamp | def record_timestamp(pathname_full):
"""Record the timestamp of running in a dotfile."""
if Settings.test or Settings.list_only or not Settings.record_timestamp:
return
if not Settings.follow_symlinks and os.path.islink(pathname_full):
if Settings.verbose:
print('Not setting timestamp because not following symlinks')
return
if not os.path.isdir(pathname_full):
if Settings.verbose:
print('Not setting timestamp for a non-directory')
return
record_filename_full = os.path.join(pathname_full, RECORD_FILENAME)
try:
with open(record_filename_full, 'w'):
os.utime(record_filename_full, None)
if Settings.verbose:
print("Set timestamp: {}".format(record_filename_full))
for fname in OLD_TIMESTAMPS:
if fname.startswith(pathname_full) and \
fname != record_filename_full:
# only remove timestamps below the curent path
# but don't remove the timestamp we just set!
os.remove(fname)
if Settings.verbose:
print('Removed old timestamp: {}'.format(fname))
except IOError:
print("Could not set timestamp in {}".format(pathname_full)) | python | def record_timestamp(pathname_full):
"""Record the timestamp of running in a dotfile."""
if Settings.test or Settings.list_only or not Settings.record_timestamp:
return
if not Settings.follow_symlinks and os.path.islink(pathname_full):
if Settings.verbose:
print('Not setting timestamp because not following symlinks')
return
if not os.path.isdir(pathname_full):
if Settings.verbose:
print('Not setting timestamp for a non-directory')
return
record_filename_full = os.path.join(pathname_full, RECORD_FILENAME)
try:
with open(record_filename_full, 'w'):
os.utime(record_filename_full, None)
if Settings.verbose:
print("Set timestamp: {}".format(record_filename_full))
for fname in OLD_TIMESTAMPS:
if fname.startswith(pathname_full) and \
fname != record_filename_full:
# only remove timestamps below the curent path
# but don't remove the timestamp we just set!
os.remove(fname)
if Settings.verbose:
print('Removed old timestamp: {}'.format(fname))
except IOError:
print("Could not set timestamp in {}".format(pathname_full)) | [
"def",
"record_timestamp",
"(",
"pathname_full",
")",
":",
"if",
"Settings",
".",
"test",
"or",
"Settings",
".",
"list_only",
"or",
"not",
"Settings",
".",
"record_timestamp",
":",
"return",
"if",
"not",
"Settings",
".",
"follow_symlinks",
"and",
"os",
".",
... | Record the timestamp of running in a dotfile. | [
"Record",
"the",
"timestamp",
"of",
"running",
"in",
"a",
"dotfile",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/timestamp.py#L95-L123 | train | 42,473 |
ajslater/picopt | picopt/walk.py | walk_comic_archive | def walk_comic_archive(filename_full, image_format, optimize_after):
"""
Optimize a comic archive.
This is done mostly inline to use the master processes process pool
for workers. And to avoid calling back up into walk from a dedicated
module or format processor. It does mean that we block on uncompress
and on waiting for the contents subprocesses to compress.
"""
# uncompress archive
tmp_dir, report_stats = comic.comic_archive_uncompress(filename_full,
image_format)
if tmp_dir is None and report_stats:
return Settings.pool.apply_async(_comic_archive_skip,
args=report_stats)
# optimize contents of archive
archive_mtime = os.stat(filename_full).st_mtime
result_set = walk_dir(tmp_dir, optimize_after, True, archive_mtime)
# wait for archive contents to optimize before recompressing
nag_about_gifs = False
for result in result_set:
res = result.get()
nag_about_gifs = nag_about_gifs or res.nag_about_gifs
# recompress archive
args = (filename_full, image_format, Settings, nag_about_gifs)
return Settings.pool.apply_async(comic.comic_archive_compress,
args=(args,)) | python | def walk_comic_archive(filename_full, image_format, optimize_after):
"""
Optimize a comic archive.
This is done mostly inline to use the master processes process pool
for workers. And to avoid calling back up into walk from a dedicated
module or format processor. It does mean that we block on uncompress
and on waiting for the contents subprocesses to compress.
"""
# uncompress archive
tmp_dir, report_stats = comic.comic_archive_uncompress(filename_full,
image_format)
if tmp_dir is None and report_stats:
return Settings.pool.apply_async(_comic_archive_skip,
args=report_stats)
# optimize contents of archive
archive_mtime = os.stat(filename_full).st_mtime
result_set = walk_dir(tmp_dir, optimize_after, True, archive_mtime)
# wait for archive contents to optimize before recompressing
nag_about_gifs = False
for result in result_set:
res = result.get()
nag_about_gifs = nag_about_gifs or res.nag_about_gifs
# recompress archive
args = (filename_full, image_format, Settings, nag_about_gifs)
return Settings.pool.apply_async(comic.comic_archive_compress,
args=(args,)) | [
"def",
"walk_comic_archive",
"(",
"filename_full",
",",
"image_format",
",",
"optimize_after",
")",
":",
"# uncompress archive",
"tmp_dir",
",",
"report_stats",
"=",
"comic",
".",
"comic_archive_uncompress",
"(",
"filename_full",
",",
"image_format",
")",
"if",
"tmp_d... | Optimize a comic archive.
This is done mostly inline to use the master processes process pool
for workers. And to avoid calling back up into walk from a dedicated
module or format processor. It does mean that we block on uncompress
and on waiting for the contents subprocesses to compress. | [
"Optimize",
"a",
"comic",
"archive",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/walk.py#L16-L45 | train | 42,474 |
ajslater/picopt | picopt/walk.py | _is_skippable | def _is_skippable(filename_full):
"""Handle things that are not optimizable files."""
# File types
if not Settings.follow_symlinks and os.path.islink(filename_full):
return True
if os.path.basename(filename_full) == timestamp.RECORD_FILENAME:
return True
if not os.path.exists(filename_full):
if Settings.verbose:
print(filename_full, 'was not found.')
return True
return False | python | def _is_skippable(filename_full):
"""Handle things that are not optimizable files."""
# File types
if not Settings.follow_symlinks and os.path.islink(filename_full):
return True
if os.path.basename(filename_full) == timestamp.RECORD_FILENAME:
return True
if not os.path.exists(filename_full):
if Settings.verbose:
print(filename_full, 'was not found.')
return True
return False | [
"def",
"_is_skippable",
"(",
"filename_full",
")",
":",
"# File types",
"if",
"not",
"Settings",
".",
"follow_symlinks",
"and",
"os",
".",
"path",
".",
"islink",
"(",
"filename_full",
")",
":",
"return",
"True",
"if",
"os",
".",
"path",
".",
"basename",
"(... | Handle things that are not optimizable files. | [
"Handle",
"things",
"that",
"are",
"not",
"optimizable",
"files",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/walk.py#L48-L61 | train | 42,475 |
ajslater/picopt | picopt/walk.py | walk_file | def walk_file(filename, walk_after, recurse=None, archive_mtime=None):
"""Optimize an individual file."""
filename = os.path.normpath(filename)
result_set = set()
if _is_skippable(filename):
return result_set
walk_after = timestamp.get_walk_after(filename, walk_after)
# File is a directory
if os.path.isdir(filename):
return walk_dir(filename, walk_after, recurse, archive_mtime)
if _is_older_than_timestamp(filename, walk_after, archive_mtime):
return result_set
# Check image format
try:
image_format = detect_format.detect_file(filename)
except Exception:
res = Settings.pool.apply_async(stats.ReportStats,
(filename,),
{'error': "Detect Format"})
result_set.add(res)
image_format = False
if not image_format:
return result_set
if Settings.list_only:
# list only
print("{}: {}".format(filename, image_format))
return result_set
if detect_format.is_format_selected(image_format, comic.FORMATS,
comic.PROGRAMS):
# comic archive
result = walk_comic_archive(filename, image_format, walk_after)
else:
# regular image
args = [filename, image_format, Settings]
result = Settings.pool.apply_async(optimize.optimize_image,
args=(args,))
result_set.add(result)
return result_set | python | def walk_file(filename, walk_after, recurse=None, archive_mtime=None):
"""Optimize an individual file."""
filename = os.path.normpath(filename)
result_set = set()
if _is_skippable(filename):
return result_set
walk_after = timestamp.get_walk_after(filename, walk_after)
# File is a directory
if os.path.isdir(filename):
return walk_dir(filename, walk_after, recurse, archive_mtime)
if _is_older_than_timestamp(filename, walk_after, archive_mtime):
return result_set
# Check image format
try:
image_format = detect_format.detect_file(filename)
except Exception:
res = Settings.pool.apply_async(stats.ReportStats,
(filename,),
{'error': "Detect Format"})
result_set.add(res)
image_format = False
if not image_format:
return result_set
if Settings.list_only:
# list only
print("{}: {}".format(filename, image_format))
return result_set
if detect_format.is_format_selected(image_format, comic.FORMATS,
comic.PROGRAMS):
# comic archive
result = walk_comic_archive(filename, image_format, walk_after)
else:
# regular image
args = [filename, image_format, Settings]
result = Settings.pool.apply_async(optimize.optimize_image,
args=(args,))
result_set.add(result)
return result_set | [
"def",
"walk_file",
"(",
"filename",
",",
"walk_after",
",",
"recurse",
"=",
"None",
",",
"archive_mtime",
"=",
"None",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"filename",
")",
"result_set",
"=",
"set",
"(",
")",
"if",
"_is_... | Optimize an individual file. | [
"Optimize",
"an",
"individual",
"file",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/walk.py#L78-L124 | train | 42,476 |
ajslater/picopt | picopt/walk.py | walk_dir | def walk_dir(dir_path, walk_after, recurse=None, archive_mtime=None):
"""Recursively optimize a directory."""
if recurse is None:
recurse = Settings.recurse
result_set = set()
if not recurse:
return result_set
for root, _, filenames in os.walk(dir_path):
for filename in filenames:
filename_full = os.path.join(root, filename)
try:
results = walk_file(filename_full, walk_after, recurse,
archive_mtime)
result_set = result_set.union(results)
except Exception:
print("Error with file: {}".format(filename_full))
raise
return result_set | python | def walk_dir(dir_path, walk_after, recurse=None, archive_mtime=None):
"""Recursively optimize a directory."""
if recurse is None:
recurse = Settings.recurse
result_set = set()
if not recurse:
return result_set
for root, _, filenames in os.walk(dir_path):
for filename in filenames:
filename_full = os.path.join(root, filename)
try:
results = walk_file(filename_full, walk_after, recurse,
archive_mtime)
result_set = result_set.union(results)
except Exception:
print("Error with file: {}".format(filename_full))
raise
return result_set | [
"def",
"walk_dir",
"(",
"dir_path",
",",
"walk_after",
",",
"recurse",
"=",
"None",
",",
"archive_mtime",
"=",
"None",
")",
":",
"if",
"recurse",
"is",
"None",
":",
"recurse",
"=",
"Settings",
".",
"recurse",
"result_set",
"=",
"set",
"(",
")",
"if",
"... | Recursively optimize a directory. | [
"Recursively",
"optimize",
"a",
"directory",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/walk.py#L127-L147 | train | 42,477 |
ajslater/picopt | picopt/walk.py | _walk_all_files | def _walk_all_files():
"""
Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files.
"""
# Init records
record_dirs = set()
result_set = set()
for filename in Settings.paths:
# Record dirs to put timestamps in later
filename_full = os.path.abspath(filename)
if Settings.recurse and os.path.isdir(filename_full):
record_dirs.add(filename_full)
walk_after = timestamp.get_walk_after(filename_full)
results = walk_file(filename_full, walk_after, Settings.recurse)
result_set = result_set.union(results)
bytes_in = 0
bytes_out = 0
nag_about_gifs = False
errors = []
for result in result_set:
res = result.get()
if res.error:
errors += [(res.final_filename, res.error)]
continue
bytes_in += res.bytes_in
bytes_out += res.bytes_out
nag_about_gifs = nag_about_gifs or res.nag_about_gifs
return record_dirs, bytes_in, bytes_out, nag_about_gifs, errors | python | def _walk_all_files():
"""
Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files.
"""
# Init records
record_dirs = set()
result_set = set()
for filename in Settings.paths:
# Record dirs to put timestamps in later
filename_full = os.path.abspath(filename)
if Settings.recurse and os.path.isdir(filename_full):
record_dirs.add(filename_full)
walk_after = timestamp.get_walk_after(filename_full)
results = walk_file(filename_full, walk_after, Settings.recurse)
result_set = result_set.union(results)
bytes_in = 0
bytes_out = 0
nag_about_gifs = False
errors = []
for result in result_set:
res = result.get()
if res.error:
errors += [(res.final_filename, res.error)]
continue
bytes_in += res.bytes_in
bytes_out += res.bytes_out
nag_about_gifs = nag_about_gifs or res.nag_about_gifs
return record_dirs, bytes_in, bytes_out, nag_about_gifs, errors | [
"def",
"_walk_all_files",
"(",
")",
":",
"# Init records",
"record_dirs",
"=",
"set",
"(",
")",
"result_set",
"=",
"set",
"(",
")",
"for",
"filename",
"in",
"Settings",
".",
"paths",
":",
"# Record dirs to put timestamps in later",
"filename_full",
"=",
"os",
".... | Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files. | [
"Optimize",
"the",
"files",
"from",
"the",
"arugments",
"list",
"in",
"two",
"batches",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/walk.py#L150-L184 | train | 42,478 |
ajslater/picopt | picopt/walk.py | run | def run():
"""Use preconfigured settings to optimize files."""
# Setup Multiprocessing
# manager = multiprocessing.Manager()
Settings.pool = multiprocessing.Pool(Settings.jobs)
# Optimize Files
record_dirs, bytes_in, bytes_out, nag_about_gifs, errors = \
_walk_all_files()
# Shut down multiprocessing
Settings.pool.close()
Settings.pool.join()
# Write timestamps
for filename in record_dirs:
timestamp.record_timestamp(filename)
# Finish by reporting totals
stats.report_totals(bytes_in, bytes_out, nag_about_gifs, errors) | python | def run():
"""Use preconfigured settings to optimize files."""
# Setup Multiprocessing
# manager = multiprocessing.Manager()
Settings.pool = multiprocessing.Pool(Settings.jobs)
# Optimize Files
record_dirs, bytes_in, bytes_out, nag_about_gifs, errors = \
_walk_all_files()
# Shut down multiprocessing
Settings.pool.close()
Settings.pool.join()
# Write timestamps
for filename in record_dirs:
timestamp.record_timestamp(filename)
# Finish by reporting totals
stats.report_totals(bytes_in, bytes_out, nag_about_gifs, errors) | [
"def",
"run",
"(",
")",
":",
"# Setup Multiprocessing",
"# manager = multiprocessing.Manager()",
"Settings",
".",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"Settings",
".",
"jobs",
")",
"# Optimize Files",
"record_dirs",
",",
"bytes_in",
",",
"bytes_out",
",... | Use preconfigured settings to optimize files. | [
"Use",
"preconfigured",
"settings",
"to",
"optimize",
"files",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/walk.py#L187-L206 | train | 42,479 |
ajslater/picopt | picopt/files.py | replace_ext | def replace_ext(filename, new_ext):
"""Replace the file extention."""
filename_base = os.path.splitext(filename)[0]
new_filename = '{}.{}'.format(filename_base, new_ext)
return new_filename | python | def replace_ext(filename, new_ext):
"""Replace the file extention."""
filename_base = os.path.splitext(filename)[0]
new_filename = '{}.{}'.format(filename_base, new_ext)
return new_filename | [
"def",
"replace_ext",
"(",
"filename",
",",
"new_ext",
")",
":",
"filename_base",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"new_filename",
"=",
"'{}.{}'",
".",
"format",
"(",
"filename_base",
",",
"new_ext",
")",
"ret... | Replace the file extention. | [
"Replace",
"the",
"file",
"extention",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/files.py#L12-L16 | train | 42,480 |
ajslater/picopt | setup.py | parse_reqs | def parse_reqs(filename):
"""Parse setup requirements from a requirements.txt file."""
install_reqs = parse_requirements(filename, session=False)
return [str(ir.req) for ir in install_reqs] | python | def parse_reqs(filename):
"""Parse setup requirements from a requirements.txt file."""
install_reqs = parse_requirements(filename, session=False)
return [str(ir.req) for ir in install_reqs] | [
"def",
"parse_reqs",
"(",
"filename",
")",
":",
"install_reqs",
"=",
"parse_requirements",
"(",
"filename",
",",
"session",
"=",
"False",
")",
"return",
"[",
"str",
"(",
"ir",
".",
"req",
")",
"for",
"ir",
"in",
"install_reqs",
"]"
] | Parse setup requirements from a requirements.txt file. | [
"Parse",
"setup",
"requirements",
"from",
"a",
"requirements",
".",
"txt",
"file",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/setup.py#L43-L46 | train | 42,481 |
ajslater/picopt | setup.py | get_req_list | def get_req_list():
"""Get the requirements by weather we're building develop or not."""
req_list = parse_reqs(REQUIREMENTS['prod'])
if len(sys.argv) > 2 and sys.argv[2] == ('develop'):
req_list += parse_reqs(REQUIREMENTS['dev'])
return req_list | python | def get_req_list():
"""Get the requirements by weather we're building develop or not."""
req_list = parse_reqs(REQUIREMENTS['prod'])
if len(sys.argv) > 2 and sys.argv[2] == ('develop'):
req_list += parse_reqs(REQUIREMENTS['dev'])
return req_list | [
"def",
"get_req_list",
"(",
")",
":",
"req_list",
"=",
"parse_reqs",
"(",
"REQUIREMENTS",
"[",
"'prod'",
"]",
")",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
">",
"2",
"and",
"sys",
".",
"argv",
"[",
"2",
"]",
"==",
"(",
"'develop'",
")",
":",
"r... | Get the requirements by weather we're building develop or not. | [
"Get",
"the",
"requirements",
"by",
"weather",
"we",
"re",
"building",
"develop",
"or",
"not",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/setup.py#L49-L54 | train | 42,482 |
ajslater/picopt | picopt/formats/comic.py | get_comic_format | def get_comic_format(filename):
"""Return the comic format if it is a comic archive."""
image_format = None
filename_ext = os.path.splitext(filename)[-1].lower()
if filename_ext in _COMIC_EXTS:
if zipfile.is_zipfile(filename):
image_format = _CBZ_FORMAT
elif rarfile.is_rarfile(filename):
image_format = _CBR_FORMAT
return image_format | python | def get_comic_format(filename):
"""Return the comic format if it is a comic archive."""
image_format = None
filename_ext = os.path.splitext(filename)[-1].lower()
if filename_ext in _COMIC_EXTS:
if zipfile.is_zipfile(filename):
image_format = _CBZ_FORMAT
elif rarfile.is_rarfile(filename):
image_format = _CBR_FORMAT
return image_format | [
"def",
"get_comic_format",
"(",
"filename",
")",
":",
"image_format",
"=",
"None",
"filename_ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
".",
"lower",
"(",
")",
"if",
"filename_ext",
"in",
"_COMIC_EXTS",
":",
... | Return the comic format if it is a comic archive. | [
"Return",
"the",
"comic",
"format",
"if",
"it",
"is",
"a",
"comic",
"archive",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/formats/comic.py#L43-L52 | train | 42,483 |
ajslater/picopt | picopt/formats/comic.py | _get_archive_tmp_dir | def _get_archive_tmp_dir(filename):
"""Get the name of the working dir to use for this filename."""
head, tail = os.path.split(filename)
return os.path.join(head, _ARCHIVE_TMP_DIR_TEMPLATE.format(tail)) | python | def _get_archive_tmp_dir(filename):
"""Get the name of the working dir to use for this filename."""
head, tail = os.path.split(filename)
return os.path.join(head, _ARCHIVE_TMP_DIR_TEMPLATE.format(tail)) | [
"def",
"_get_archive_tmp_dir",
"(",
"filename",
")",
":",
"head",
",",
"tail",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"head",
",",
"_ARCHIVE_TMP_DIR_TEMPLATE",
".",
"format",
"(",
"tail"... | Get the name of the working dir to use for this filename. | [
"Get",
"the",
"name",
"of",
"the",
"working",
"dir",
"to",
"use",
"for",
"this",
"filename",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/formats/comic.py#L55-L58 | train | 42,484 |
ajslater/picopt | picopt/formats/comic.py | comic_archive_uncompress | def comic_archive_uncompress(filename, image_format):
"""
Uncompress comic archives.
Return the name of the working directory we uncompressed into.
"""
if not Settings.comics:
report = ['Skipping archive file: {}'.format(filename)]
return None, ReportStats(filename, report=report)
if Settings.verbose:
truncated_filename = stats.truncate_cwd(filename)
print("Extracting {}...".format(truncated_filename), end='')
# create the tmpdir
tmp_dir = _get_archive_tmp_dir(filename)
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
# extract archvie into the tmpdir
if image_format == _CBZ_FORMAT:
with zipfile.ZipFile(filename, 'r') as zfile:
zfile.extractall(tmp_dir)
elif image_format == _CBR_FORMAT:
with rarfile.RarFile(filename, 'r') as rfile:
rfile.extractall(tmp_dir)
else:
report = '{} {} is not a good format'.format(filename, image_format)
return None, ReportStats(filename, report=report)
if Settings.verbose:
print('done')
return tmp_dir, None | python | def comic_archive_uncompress(filename, image_format):
"""
Uncompress comic archives.
Return the name of the working directory we uncompressed into.
"""
if not Settings.comics:
report = ['Skipping archive file: {}'.format(filename)]
return None, ReportStats(filename, report=report)
if Settings.verbose:
truncated_filename = stats.truncate_cwd(filename)
print("Extracting {}...".format(truncated_filename), end='')
# create the tmpdir
tmp_dir = _get_archive_tmp_dir(filename)
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
# extract archvie into the tmpdir
if image_format == _CBZ_FORMAT:
with zipfile.ZipFile(filename, 'r') as zfile:
zfile.extractall(tmp_dir)
elif image_format == _CBR_FORMAT:
with rarfile.RarFile(filename, 'r') as rfile:
rfile.extractall(tmp_dir)
else:
report = '{} {} is not a good format'.format(filename, image_format)
return None, ReportStats(filename, report=report)
if Settings.verbose:
print('done')
return tmp_dir, None | [
"def",
"comic_archive_uncompress",
"(",
"filename",
",",
"image_format",
")",
":",
"if",
"not",
"Settings",
".",
"comics",
":",
"report",
"=",
"[",
"'Skipping archive file: {}'",
".",
"format",
"(",
"filename",
")",
"]",
"return",
"None",
",",
"ReportStats",
"... | Uncompress comic archives.
Return the name of the working directory we uncompressed into. | [
"Uncompress",
"comic",
"archives",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/formats/comic.py#L61-L95 | train | 42,485 |
ajslater/picopt | picopt/formats/comic.py | _comic_archive_write_zipfile | def _comic_archive_write_zipfile(new_filename, tmp_dir):
"""Zip up the files in the tempdir into the new filename."""
if Settings.verbose:
print('Rezipping archive', end='')
with zipfile.ZipFile(new_filename, 'w',
compression=zipfile.ZIP_DEFLATED) as new_zf:
root_len = len(os.path.abspath(tmp_dir))
for r_d_f in os.walk(tmp_dir):
root = r_d_f[0]
filenames = r_d_f[2]
archive_root = os.path.abspath(root)[root_len:]
for fname in filenames:
fullpath = os.path.join(root, fname)
archive_name = os.path.join(archive_root, fname)
if Settings.verbose:
print('.', end='')
new_zf.write(fullpath, archive_name, zipfile.ZIP_DEFLATED) | python | def _comic_archive_write_zipfile(new_filename, tmp_dir):
"""Zip up the files in the tempdir into the new filename."""
if Settings.verbose:
print('Rezipping archive', end='')
with zipfile.ZipFile(new_filename, 'w',
compression=zipfile.ZIP_DEFLATED) as new_zf:
root_len = len(os.path.abspath(tmp_dir))
for r_d_f in os.walk(tmp_dir):
root = r_d_f[0]
filenames = r_d_f[2]
archive_root = os.path.abspath(root)[root_len:]
for fname in filenames:
fullpath = os.path.join(root, fname)
archive_name = os.path.join(archive_root, fname)
if Settings.verbose:
print('.', end='')
new_zf.write(fullpath, archive_name, zipfile.ZIP_DEFLATED) | [
"def",
"_comic_archive_write_zipfile",
"(",
"new_filename",
",",
"tmp_dir",
")",
":",
"if",
"Settings",
".",
"verbose",
":",
"print",
"(",
"'Rezipping archive'",
",",
"end",
"=",
"''",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"new_filename",
",",
"'w'",
... | Zip up the files in the tempdir into the new filename. | [
"Zip",
"up",
"the",
"files",
"in",
"the",
"tempdir",
"into",
"the",
"new",
"filename",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/formats/comic.py#L98-L114 | train | 42,486 |
ajslater/picopt | picopt/formats/comic.py | comic_archive_compress | def comic_archive_compress(args):
"""
Called back by every optimization inside a comic archive.
When they're all done it creates the new archive and cleans up.
"""
try:
filename, old_format, settings, nag_about_gifs = args
Settings.update(settings)
tmp_dir = _get_archive_tmp_dir(filename)
# archive into new filename
new_filename = files.replace_ext(filename, _NEW_ARCHIVE_SUFFIX)
_comic_archive_write_zipfile(new_filename, tmp_dir)
# Cleanup tmpdir
if os.path.isdir(tmp_dir):
if Settings.verbose:
print('.', end='')
shutil.rmtree(tmp_dir)
if Settings.verbose:
print('done.')
report_stats = files.cleanup_after_optimize(
filename, new_filename, old_format, _CBZ_FORMAT)
report_stats.nag_about_gifs = nag_about_gifs
stats.report_saved(report_stats)
return report_stats
except Exception as exc:
print(exc)
traceback.print_exc(exc)
raise exc | python | def comic_archive_compress(args):
"""
Called back by every optimization inside a comic archive.
When they're all done it creates the new archive and cleans up.
"""
try:
filename, old_format, settings, nag_about_gifs = args
Settings.update(settings)
tmp_dir = _get_archive_tmp_dir(filename)
# archive into new filename
new_filename = files.replace_ext(filename, _NEW_ARCHIVE_SUFFIX)
_comic_archive_write_zipfile(new_filename, tmp_dir)
# Cleanup tmpdir
if os.path.isdir(tmp_dir):
if Settings.verbose:
print('.', end='')
shutil.rmtree(tmp_dir)
if Settings.verbose:
print('done.')
report_stats = files.cleanup_after_optimize(
filename, new_filename, old_format, _CBZ_FORMAT)
report_stats.nag_about_gifs = nag_about_gifs
stats.report_saved(report_stats)
return report_stats
except Exception as exc:
print(exc)
traceback.print_exc(exc)
raise exc | [
"def",
"comic_archive_compress",
"(",
"args",
")",
":",
"try",
":",
"filename",
",",
"old_format",
",",
"settings",
",",
"nag_about_gifs",
"=",
"args",
"Settings",
".",
"update",
"(",
"settings",
")",
"tmp_dir",
"=",
"_get_archive_tmp_dir",
"(",
"filename",
")... | Called back by every optimization inside a comic archive.
When they're all done it creates the new archive and cleans up. | [
"Called",
"back",
"by",
"every",
"optimization",
"inside",
"a",
"comic",
"archive",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/formats/comic.py#L117-L149 | train | 42,487 |
ajslater/picopt | picopt/formats/jpeg.py | mozjpeg | def mozjpeg(ext_args):
"""Create argument list for mozjpeg."""
args = copy.copy(_MOZJPEG_ARGS)
if Settings.destroy_metadata:
args += ["-copy", "none"]
else:
args += ["-copy", "all"]
args += ['-outfile']
args += [ext_args.new_filename, ext_args.old_filename]
extern.run_ext(args)
return _JPEG_FORMAT | python | def mozjpeg(ext_args):
"""Create argument list for mozjpeg."""
args = copy.copy(_MOZJPEG_ARGS)
if Settings.destroy_metadata:
args += ["-copy", "none"]
else:
args += ["-copy", "all"]
args += ['-outfile']
args += [ext_args.new_filename, ext_args.old_filename]
extern.run_ext(args)
return _JPEG_FORMAT | [
"def",
"mozjpeg",
"(",
"ext_args",
")",
":",
"args",
"=",
"copy",
".",
"copy",
"(",
"_MOZJPEG_ARGS",
")",
"if",
"Settings",
".",
"destroy_metadata",
":",
"args",
"+=",
"[",
"\"-copy\"",
",",
"\"none\"",
"]",
"else",
":",
"args",
"+=",
"[",
"\"-copy\"",
... | Create argument list for mozjpeg. | [
"Create",
"argument",
"list",
"for",
"mozjpeg",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/formats/jpeg.py#L18-L28 | train | 42,488 |
ajslater/picopt | picopt/formats/jpeg.py | jpegtran | def jpegtran(ext_args):
"""Create argument list for jpegtran."""
args = copy.copy(_JPEGTRAN_ARGS)
if Settings.destroy_metadata:
args += ["-copy", "none"]
else:
args += ["-copy", "all"]
if Settings.jpegtran_prog:
args += ["-progressive"]
args += ['-outfile']
args += [ext_args.new_filename, ext_args.old_filename]
extern.run_ext(args)
return _JPEG_FORMAT | python | def jpegtran(ext_args):
"""Create argument list for jpegtran."""
args = copy.copy(_JPEGTRAN_ARGS)
if Settings.destroy_metadata:
args += ["-copy", "none"]
else:
args += ["-copy", "all"]
if Settings.jpegtran_prog:
args += ["-progressive"]
args += ['-outfile']
args += [ext_args.new_filename, ext_args.old_filename]
extern.run_ext(args)
return _JPEG_FORMAT | [
"def",
"jpegtran",
"(",
"ext_args",
")",
":",
"args",
"=",
"copy",
".",
"copy",
"(",
"_JPEGTRAN_ARGS",
")",
"if",
"Settings",
".",
"destroy_metadata",
":",
"args",
"+=",
"[",
"\"-copy\"",
",",
"\"none\"",
"]",
"else",
":",
"args",
"+=",
"[",
"\"-copy\"",... | Create argument list for jpegtran. | [
"Create",
"argument",
"list",
"for",
"jpegtran",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/formats/jpeg.py#L31-L43 | train | 42,489 |
ajslater/picopt | picopt/formats/jpeg.py | jpegrescan | def jpegrescan(ext_args):
"""Run the EXTERNAL program jpegrescan."""
args = copy.copy(_JPEGRESCAN_ARGS)
if Settings.jpegrescan_multithread:
args += ['-t']
if Settings.destroy_metadata:
args += ['-s']
args += [ext_args.old_filename, ext_args.new_filename]
extern.run_ext(args)
return _JPEG_FORMAT | python | def jpegrescan(ext_args):
"""Run the EXTERNAL program jpegrescan."""
args = copy.copy(_JPEGRESCAN_ARGS)
if Settings.jpegrescan_multithread:
args += ['-t']
if Settings.destroy_metadata:
args += ['-s']
args += [ext_args.old_filename, ext_args.new_filename]
extern.run_ext(args)
return _JPEG_FORMAT | [
"def",
"jpegrescan",
"(",
"ext_args",
")",
":",
"args",
"=",
"copy",
".",
"copy",
"(",
"_JPEGRESCAN_ARGS",
")",
"if",
"Settings",
".",
"jpegrescan_multithread",
":",
"args",
"+=",
"[",
"'-t'",
"]",
"if",
"Settings",
".",
"destroy_metadata",
":",
"args",
"+... | Run the EXTERNAL program jpegrescan. | [
"Run",
"the",
"EXTERNAL",
"program",
"jpegrescan",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/formats/jpeg.py#L46-L55 | train | 42,490 |
ajslater/picopt | picopt/cli.py | process_arguments | def process_arguments(arguments):
"""Recompute special cases for input arguments."""
Settings.update(arguments)
Settings.config_program_reqs(PROGRAMS)
Settings.verbose = arguments.verbose + 1
Settings.paths = set(arguments.paths)
if arguments.formats == DEFAULT_FORMATS:
Settings.formats = arguments.to_png_formats | \
jpeg.FORMATS | gif.FORMATS
else:
Settings.formats = set(
arguments.formats.upper().split(FORMAT_DELIMETER))
if arguments.comics:
Settings.formats = Settings.formats | comic.FORMATS
if arguments.optimize_after is not None:
try:
after_dt = dateutil.parser.parse(arguments.optimize_after)
arguments.optimize_after = time.mktime(after_dt.timetuple())
except Exception as ex:
print(ex)
print('Could not parse date to optimize after.')
exit(1)
if arguments.jobs < 1:
Settings.jobs = 1
# Make a rough guess about weather or not to invoke multithreding
# jpegrescan '-t' uses three threads
# one off multithread switch bcaseu this is the only one right now
files_in_paths = 0
non_file_in_paths = False
for filename in arguments.paths:
if os.path.isfile(filename):
files_in_paths += 1
else:
non_file_in_paths = True
Settings.jpegrescan_multithread = not non_file_in_paths and \
Settings.jobs - (files_in_paths*3) > -1
return arguments | python | def process_arguments(arguments):
"""Recompute special cases for input arguments."""
Settings.update(arguments)
Settings.config_program_reqs(PROGRAMS)
Settings.verbose = arguments.verbose + 1
Settings.paths = set(arguments.paths)
if arguments.formats == DEFAULT_FORMATS:
Settings.formats = arguments.to_png_formats | \
jpeg.FORMATS | gif.FORMATS
else:
Settings.formats = set(
arguments.formats.upper().split(FORMAT_DELIMETER))
if arguments.comics:
Settings.formats = Settings.formats | comic.FORMATS
if arguments.optimize_after is not None:
try:
after_dt = dateutil.parser.parse(arguments.optimize_after)
arguments.optimize_after = time.mktime(after_dt.timetuple())
except Exception as ex:
print(ex)
print('Could not parse date to optimize after.')
exit(1)
if arguments.jobs < 1:
Settings.jobs = 1
# Make a rough guess about weather or not to invoke multithreding
# jpegrescan '-t' uses three threads
# one off multithread switch bcaseu this is the only one right now
files_in_paths = 0
non_file_in_paths = False
for filename in arguments.paths:
if os.path.isfile(filename):
files_in_paths += 1
else:
non_file_in_paths = True
Settings.jpegrescan_multithread = not non_file_in_paths and \
Settings.jobs - (files_in_paths*3) > -1
return arguments | [
"def",
"process_arguments",
"(",
"arguments",
")",
":",
"Settings",
".",
"update",
"(",
"arguments",
")",
"Settings",
".",
"config_program_reqs",
"(",
"PROGRAMS",
")",
"Settings",
".",
"verbose",
"=",
"arguments",
".",
"verbose",
"+",
"1",
"Settings",
".",
"... | Recompute special cases for input arguments. | [
"Recompute",
"special",
"cases",
"for",
"input",
"arguments",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/cli.py#L126-L170 | train | 42,491 |
ajslater/picopt | picopt/cli.py | run | def run(args):
"""Process command line arguments and walk inputs."""
raw_arguments = get_arguments(args[1:])
process_arguments(raw_arguments)
walk.run()
return True | python | def run(args):
"""Process command line arguments and walk inputs."""
raw_arguments = get_arguments(args[1:])
process_arguments(raw_arguments)
walk.run()
return True | [
"def",
"run",
"(",
"args",
")",
":",
"raw_arguments",
"=",
"get_arguments",
"(",
"args",
"[",
"1",
":",
"]",
")",
"process_arguments",
"(",
"raw_arguments",
")",
"walk",
".",
"run",
"(",
")",
"return",
"True"
] | Process command line arguments and walk inputs. | [
"Process",
"command",
"line",
"arguments",
"and",
"walk",
"inputs",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/cli.py#L173-L178 | train | 42,492 |
ajslater/picopt | picopt/extern.py | does_external_program_run | def does_external_program_run(prog, verbose):
"""Test to see if the external programs can be run."""
try:
with open('/dev/null') as null:
subprocess.call([prog, '-h'], stdout=null, stderr=null)
result = True
except OSError:
if verbose > 1:
print("couldn't run {}".format(prog))
result = False
return result | python | def does_external_program_run(prog, verbose):
"""Test to see if the external programs can be run."""
try:
with open('/dev/null') as null:
subprocess.call([prog, '-h'], stdout=null, stderr=null)
result = True
except OSError:
if verbose > 1:
print("couldn't run {}".format(prog))
result = False
return result | [
"def",
"does_external_program_run",
"(",
"prog",
",",
"verbose",
")",
":",
"try",
":",
"with",
"open",
"(",
"'/dev/null'",
")",
"as",
"null",
":",
"subprocess",
".",
"call",
"(",
"[",
"prog",
",",
"'-h'",
"]",
",",
"stdout",
"=",
"null",
",",
"stderr",... | Test to see if the external programs can be run. | [
"Test",
"to",
"see",
"if",
"the",
"external",
"programs",
"can",
"be",
"run",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/extern.py#L16-L27 | train | 42,493 |
ajslater/picopt | picopt/extern.py | run_ext | def run_ext(args):
"""Run EXTERNAL program."""
try:
subprocess.check_call(args)
except subprocess.CalledProcessError as exc:
print(exc)
print(exc.cmd)
print(exc.returncode)
print(exc.output)
raise | python | def run_ext(args):
"""Run EXTERNAL program."""
try:
subprocess.check_call(args)
except subprocess.CalledProcessError as exc:
print(exc)
print(exc.cmd)
print(exc.returncode)
print(exc.output)
raise | [
"def",
"run_ext",
"(",
"args",
")",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"args",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"exc",
":",
"print",
"(",
"exc",
")",
"print",
"(",
"exc",
".",
"cmd",
")",
"print",
"(",
... | Run EXTERNAL program. | [
"Run",
"EXTERNAL",
"program",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/extern.py#L30-L39 | train | 42,494 |
ajslater/picopt | picopt/stats.py | _humanize_bytes | def _humanize_bytes(num_bytes, precision=1):
"""
Return a humanized string representation of a number of num_bytes.
from:
http://code.activestate.com/recipes/
577081-humanized-representation-of-a-number-of-num_bytes/
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB'
"""
if num_bytes == 0:
return 'no bytes'
if num_bytes == 1:
return '1 byte'
factored_bytes = 0
factor_suffix = 'bytes'
for factor, suffix in ABBREVS:
if num_bytes >= factor:
factored_bytes = num_bytes / factor
factor_suffix = suffix
break
if factored_bytes == 1:
precision = 0
return '{:.{prec}f} {}'.format(factored_bytes, factor_suffix,
prec=precision) | python | def _humanize_bytes(num_bytes, precision=1):
"""
Return a humanized string representation of a number of num_bytes.
from:
http://code.activestate.com/recipes/
577081-humanized-representation-of-a-number-of-num_bytes/
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB'
"""
if num_bytes == 0:
return 'no bytes'
if num_bytes == 1:
return '1 byte'
factored_bytes = 0
factor_suffix = 'bytes'
for factor, suffix in ABBREVS:
if num_bytes >= factor:
factored_bytes = num_bytes / factor
factor_suffix = suffix
break
if factored_bytes == 1:
precision = 0
return '{:.{prec}f} {}'.format(factored_bytes, factor_suffix,
prec=precision) | [
"def",
"_humanize_bytes",
"(",
"num_bytes",
",",
"precision",
"=",
"1",
")",
":",
"if",
"num_bytes",
"==",
"0",
":",
"return",
"'no bytes'",
"if",
"num_bytes",
"==",
"1",
":",
"return",
"'1 byte'",
"factored_bytes",
"=",
"0",
"factor_suffix",
"=",
"'bytes'",... | Return a humanized string representation of a number of num_bytes.
from:
http://code.activestate.com/recipes/
577081-humanized-representation-of-a-number-of-num_bytes/
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB' | [
"Return",
"a",
"humanized",
"string",
"representation",
"of",
"a",
"number",
"of",
"num_bytes",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/stats.py#L46-L90 | train | 42,495 |
ajslater/picopt | picopt/stats.py | new_percent_saved | def new_percent_saved(report_stats):
"""Spit out how much space the optimization saved."""
size_in = report_stats.bytes_in
if size_in > 0:
size_out = report_stats.bytes_out
ratio = size_out / size_in
kb_saved = _humanize_bytes(size_in - size_out)
else:
ratio = 0
kb_saved = 0
percent_saved = (1 - ratio) * 100
result = '{:.{prec}f}% ({})'.format(percent_saved, kb_saved, prec=2)
return result | python | def new_percent_saved(report_stats):
"""Spit out how much space the optimization saved."""
size_in = report_stats.bytes_in
if size_in > 0:
size_out = report_stats.bytes_out
ratio = size_out / size_in
kb_saved = _humanize_bytes(size_in - size_out)
else:
ratio = 0
kb_saved = 0
percent_saved = (1 - ratio) * 100
result = '{:.{prec}f}% ({})'.format(percent_saved, kb_saved, prec=2)
return result | [
"def",
"new_percent_saved",
"(",
"report_stats",
")",
":",
"size_in",
"=",
"report_stats",
".",
"bytes_in",
"if",
"size_in",
">",
"0",
":",
"size_out",
"=",
"report_stats",
".",
"bytes_out",
"ratio",
"=",
"size_out",
"/",
"size_in",
"kb_saved",
"=",
"_humanize... | Spit out how much space the optimization saved. | [
"Spit",
"out",
"how",
"much",
"space",
"the",
"optimization",
"saved",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/stats.py#L93-L106 | train | 42,496 |
ajslater/picopt | picopt/stats.py | truncate_cwd | def truncate_cwd(full_filename):
"""Remove the cwd from the full filename."""
if full_filename.startswith(os.getcwd()):
truncated_filename = full_filename.split(os.getcwd(), 1)[1]
truncated_filename = truncated_filename.split(os.sep, 1)[1]
else:
truncated_filename = full_filename
return truncated_filename | python | def truncate_cwd(full_filename):
"""Remove the cwd from the full filename."""
if full_filename.startswith(os.getcwd()):
truncated_filename = full_filename.split(os.getcwd(), 1)[1]
truncated_filename = truncated_filename.split(os.sep, 1)[1]
else:
truncated_filename = full_filename
return truncated_filename | [
"def",
"truncate_cwd",
"(",
"full_filename",
")",
":",
"if",
"full_filename",
".",
"startswith",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
":",
"truncated_filename",
"=",
"full_filename",
".",
"split",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"1",
")",
... | Remove the cwd from the full filename. | [
"Remove",
"the",
"cwd",
"from",
"the",
"full",
"filename",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/stats.py#L109-L116 | train | 42,497 |
ajslater/picopt | picopt/stats.py | report_saved | def report_saved(report_stats):
"""Record the percent saved & print it."""
if Settings.verbose:
report = ''
truncated_filename = truncate_cwd(report_stats.final_filename)
report += '{}: '.format(truncated_filename)
total = new_percent_saved(report_stats)
if total:
report += total
else:
report += '0%'
if Settings.test:
report += ' could be saved.'
if Settings.verbose > 1:
tools_report = ', '.join(report_stats.report_list)
if tools_report:
report += '\n\t' + tools_report
print(report) | python | def report_saved(report_stats):
"""Record the percent saved & print it."""
if Settings.verbose:
report = ''
truncated_filename = truncate_cwd(report_stats.final_filename)
report += '{}: '.format(truncated_filename)
total = new_percent_saved(report_stats)
if total:
report += total
else:
report += '0%'
if Settings.test:
report += ' could be saved.'
if Settings.verbose > 1:
tools_report = ', '.join(report_stats.report_list)
if tools_report:
report += '\n\t' + tools_report
print(report) | [
"def",
"report_saved",
"(",
"report_stats",
")",
":",
"if",
"Settings",
".",
"verbose",
":",
"report",
"=",
"''",
"truncated_filename",
"=",
"truncate_cwd",
"(",
"report_stats",
".",
"final_filename",
")",
"report",
"+=",
"'{}: '",
".",
"format",
"(",
"truncat... | Record the percent saved & print it. | [
"Record",
"the",
"percent",
"saved",
"&",
"print",
"it",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/stats.py#L119-L137 | train | 42,498 |
ajslater/picopt | picopt/stats.py | report_totals | def report_totals(bytes_in, bytes_out, nag_about_gifs, errors):
"""Report the total number and percent of bytes saved."""
if bytes_in:
bytes_saved = bytes_in - bytes_out
percent_bytes_saved = bytes_saved / bytes_in * 100
msg = ''
if Settings.test:
if percent_bytes_saved > 0:
msg += "Could save"
elif percent_bytes_saved == 0:
msg += "Could even out for"
else:
msg += "Could lose"
else:
if percent_bytes_saved > 0:
msg += "Saved"
elif percent_bytes_saved == 0:
msg += "Evened out"
else:
msg = "Lost"
msg += " a total of {} or {:.{prec}f}%".format(
_humanize_bytes(bytes_saved), percent_bytes_saved, prec=2)
if Settings.verbose:
print(msg)
if Settings.test:
print("Test run did not change any files.")
else:
if Settings.verbose:
print("Didn't optimize any files.")
if nag_about_gifs and Settings.verbose:
print("Most animated GIFS would be better off converted to"
" HTML5 video")
if not errors:
return
print("Errors with the following files:")
for error in errors:
print("{}: {}".format(error[0], error[1])) | python | def report_totals(bytes_in, bytes_out, nag_about_gifs, errors):
"""Report the total number and percent of bytes saved."""
if bytes_in:
bytes_saved = bytes_in - bytes_out
percent_bytes_saved = bytes_saved / bytes_in * 100
msg = ''
if Settings.test:
if percent_bytes_saved > 0:
msg += "Could save"
elif percent_bytes_saved == 0:
msg += "Could even out for"
else:
msg += "Could lose"
else:
if percent_bytes_saved > 0:
msg += "Saved"
elif percent_bytes_saved == 0:
msg += "Evened out"
else:
msg = "Lost"
msg += " a total of {} or {:.{prec}f}%".format(
_humanize_bytes(bytes_saved), percent_bytes_saved, prec=2)
if Settings.verbose:
print(msg)
if Settings.test:
print("Test run did not change any files.")
else:
if Settings.verbose:
print("Didn't optimize any files.")
if nag_about_gifs and Settings.verbose:
print("Most animated GIFS would be better off converted to"
" HTML5 video")
if not errors:
return
print("Errors with the following files:")
for error in errors:
print("{}: {}".format(error[0], error[1])) | [
"def",
"report_totals",
"(",
"bytes_in",
",",
"bytes_out",
",",
"nag_about_gifs",
",",
"errors",
")",
":",
"if",
"bytes_in",
":",
"bytes_saved",
"=",
"bytes_in",
"-",
"bytes_out",
"percent_bytes_saved",
"=",
"bytes_saved",
"/",
"bytes_in",
"*",
"100",
"msg",
"... | Report the total number and percent of bytes saved. | [
"Report",
"the",
"total",
"number",
"and",
"percent",
"of",
"bytes",
"saved",
"."
] | 261da837027563c1dc3ed07b70e1086520a60402 | https://github.com/ajslater/picopt/blob/261da837027563c1dc3ed07b70e1086520a60402/picopt/stats.py#L140-L180 | train | 42,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.