repo
stringlengths 7
55
| path
stringlengths 4
223
| func_name
stringlengths 1
134
| original_string
stringlengths 75
104k
| language
stringclasses 1
value | code
stringlengths 75
104k
| code_tokens
listlengths 19
28.4k
| docstring
stringlengths 1
46.9k
| docstring_tokens
listlengths 1
1.97k
| sha
stringlengths 40
40
| url
stringlengths 87
315
| partition
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
zip_dir
|
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
|
python
|
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
|
[
"def",
"zip_dir",
"(",
"directory",
")",
":",
"result",
"=",
"io",
".",
"BytesIO",
"(",
")",
"dlen",
"=",
"len",
"(",
"directory",
")",
"with",
"ZipFile",
"(",
"result",
",",
"\"w\"",
")",
"as",
"zf",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"directory",
")",
":",
"for",
"name",
"in",
"files",
":",
"full",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"name",
")",
"rel",
"=",
"root",
"[",
"dlen",
":",
"]",
"dest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"rel",
",",
"name",
")",
"zf",
".",
"write",
"(",
"full",
",",
"dest",
")",
"return",
"result"
] |
zip a directory tree into a BytesIO object
|
[
"zip",
"a",
"directory",
"tree",
"into",
"a",
"BytesIO",
"object"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L1249-L1260
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
iglob
|
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
|
python
|
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
|
[
"def",
"iglob",
"(",
"path_glob",
")",
":",
"if",
"_CHECK_RECURSIVE_GLOB",
".",
"search",
"(",
"path_glob",
")",
":",
"msg",
"=",
"\"\"\"invalid glob %r: recursive glob \"**\" must be used alone\"\"\"",
"raise",
"ValueError",
"(",
"msg",
"%",
"path_glob",
")",
"if",
"_CHECK_MISMATCH_SET",
".",
"search",
"(",
"path_glob",
")",
":",
"msg",
"=",
"\"\"\"invalid glob %r: mismatching set marker '{' or '}'\"\"\"",
"raise",
"ValueError",
"(",
"msg",
"%",
"path_glob",
")",
"return",
"_iglob",
"(",
"path_glob",
")"
] |
Extended globbing function that supports ** and {opt1,opt2,opt3}.
|
[
"Extended",
"globbing",
"function",
"that",
"supports",
"**",
"and",
"{",
"opt1",
"opt2",
"opt3",
"}",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L1367-L1375
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
FileOperator.newer
|
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
|
python
|
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
|
[
"def",
"newer",
"(",
"self",
",",
"source",
",",
"target",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"source",
")",
":",
"raise",
"DistlibException",
"(",
"\"file '%r' does not exist\"",
"%",
"os",
".",
"path",
".",
"abspath",
"(",
"source",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"target",
")",
":",
"return",
"True",
"return",
"os",
".",
"stat",
"(",
"source",
")",
".",
"st_mtime",
">",
"os",
".",
"stat",
"(",
"target",
")",
".",
"st_mtime"
] |
Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
|
[
"Tell",
"if",
"the",
"target",
"is",
"newer",
"than",
"the",
"source",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L493-L511
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
FileOperator.copy_file
|
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
|
python
|
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
|
[
"def",
"copy_file",
"(",
"self",
",",
"infile",
",",
"outfile",
",",
"check",
"=",
"True",
")",
":",
"self",
".",
"ensure_dir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"outfile",
")",
")",
"logger",
".",
"info",
"(",
"'Copying %s to %s'",
",",
"infile",
",",
"outfile",
")",
"if",
"not",
"self",
".",
"dry_run",
":",
"msg",
"=",
"None",
"if",
"check",
":",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"outfile",
")",
":",
"msg",
"=",
"'%s is a symlink'",
"%",
"outfile",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
"and",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"outfile",
")",
":",
"msg",
"=",
"'%s is a non-regular file'",
"%",
"outfile",
"if",
"msg",
":",
"raise",
"ValueError",
"(",
"msg",
"+",
"' which would be overwritten'",
")",
"shutil",
".",
"copyfile",
"(",
"infile",
",",
"outfile",
")",
"self",
".",
"record_as_written",
"(",
"outfile",
")"
] |
Copy a file respecting dry-run and force flags.
|
[
"Copy",
"a",
"file",
"respecting",
"dry",
"-",
"run",
"and",
"force",
"flags",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L513-L528
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
FileOperator.commit
|
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
|
python
|
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
|
[
"def",
"commit",
"(",
"self",
")",
":",
"assert",
"self",
".",
"record",
"result",
"=",
"self",
".",
"files_written",
",",
"self",
".",
"dirs_created",
"self",
".",
"_init_record",
"(",
")",
"return",
"result"
] |
Commit recorded changes, turn off recording, return
changes.
|
[
"Commit",
"recorded",
"changes",
"turn",
"off",
"recording",
"return",
"changes",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L633-L641
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
Cache.clear
|
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
|
python
|
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
|
[
"def",
"clear",
"(",
"self",
")",
":",
"not_removed",
"=",
"[",
"]",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"self",
".",
"base",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base",
",",
"fn",
")",
"try",
":",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"fn",
")",
"or",
"os",
".",
"path",
".",
"isfile",
"(",
"fn",
")",
":",
"os",
".",
"remove",
"(",
"fn",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"fn",
")",
":",
"shutil",
".",
"rmtree",
"(",
"fn",
")",
"except",
"Exception",
":",
"not_removed",
".",
"append",
"(",
"fn",
")",
"return",
"not_removed"
] |
Clear the cache.
|
[
"Clear",
"the",
"cache",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L964-L978
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
EventMixin.add
|
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
|
python
|
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
|
[
"def",
"add",
"(",
"self",
",",
"event",
",",
"subscriber",
",",
"append",
"=",
"True",
")",
":",
"subs",
"=",
"self",
".",
"_subscribers",
"if",
"event",
"not",
"in",
"subs",
":",
"subs",
"[",
"event",
"]",
"=",
"deque",
"(",
"[",
"subscriber",
"]",
")",
"else",
":",
"sq",
"=",
"subs",
"[",
"event",
"]",
"if",
"append",
":",
"sq",
".",
"append",
"(",
"subscriber",
")",
"else",
":",
"sq",
".",
"appendleft",
"(",
"subscriber",
")"
] |
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
|
[
"Add",
"a",
"subscriber",
"for",
"an",
"event",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L988-L1006
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
EventMixin.remove
|
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
|
python
|
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
|
[
"def",
"remove",
"(",
"self",
",",
"event",
",",
"subscriber",
")",
":",
"subs",
"=",
"self",
".",
"_subscribers",
"if",
"event",
"not",
"in",
"subs",
":",
"raise",
"ValueError",
"(",
"'No subscribers: %r'",
"%",
"event",
")",
"subs",
"[",
"event",
"]",
".",
"remove",
"(",
"subscriber",
")"
] |
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
|
[
"Remove",
"a",
"subscriber",
"for",
"an",
"event",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L1008-L1018
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
EventMixin.publish
|
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
|
python
|
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
|
[
"def",
"publish",
"(",
"self",
",",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"[",
"]",
"for",
"subscriber",
"in",
"self",
".",
"get_subscribers",
"(",
"event",
")",
":",
"try",
":",
"value",
"=",
"subscriber",
"(",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'Exception during event publication'",
")",
"value",
"=",
"None",
"result",
".",
"append",
"(",
"value",
")",
"logger",
".",
"debug",
"(",
"'publish %s: args = %s, kwargs = %s, result = %s'",
",",
"event",
",",
"args",
",",
"kwargs",
",",
"result",
")",
"return",
"result"
] |
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
|
[
"Publish",
"a",
"event",
"and",
"return",
"a",
"list",
"of",
"values",
"returned",
"by",
"its",
"subscribers",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L1027-L1048
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
Configurator.inc_convert
|
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
|
python
|
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
|
[
"def",
"inc_convert",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"value",
")",
":",
"value",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base",
",",
"value",
")",
"with",
"codecs",
".",
"open",
"(",
"value",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"result",
"=",
"json",
".",
"load",
"(",
"f",
")",
"return",
"result"
] |
Default converter for the inc:// protocol.
|
[
"Default",
"converter",
"for",
"the",
"inc",
":",
"//",
"protocol",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L1698-L1704
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/util.py
|
SubprocessMixin.reader
|
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
|
python
|
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
|
[
"def",
"reader",
"(",
"self",
",",
"stream",
",",
"context",
")",
":",
"progress",
"=",
"self",
".",
"progress",
"verbose",
"=",
"self",
".",
"verbose",
"while",
"True",
":",
"s",
"=",
"stream",
".",
"readline",
"(",
")",
"if",
"not",
"s",
":",
"break",
"if",
"progress",
"is",
"not",
"None",
":",
"progress",
"(",
"s",
",",
"context",
")",
"else",
":",
"if",
"not",
"verbose",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'.'",
")",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"s",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"stream",
".",
"close",
"(",
")"
] |
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
|
[
"Read",
"lines",
"from",
"a",
"subprocess",
"output",
"stream",
"and",
"either",
"pass",
"to",
"a",
"progress",
"callable",
"(",
"if",
"specified",
")",
"or",
"write",
"progress",
"information",
"to",
"sys",
".",
"stderr",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L1715-L1734
|
train
|
pypa/pipenv
|
pipenv/vendor/yarg/parse.py
|
_get
|
def _get(pypi_server):
"""
Query the PyPI RSS feed and return a list
of XML items.
"""
response = requests.get(pypi_server)
if response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
if hasattr(response.content, 'decode'):
tree = xml.etree.ElementTree.fromstring(response.content.decode())
else:
tree = xml.etree.ElementTree.fromstring(response.content)
channel = tree.find('channel')
return channel.findall('item')
|
python
|
def _get(pypi_server):
"""
Query the PyPI RSS feed and return a list
of XML items.
"""
response = requests.get(pypi_server)
if response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
if hasattr(response.content, 'decode'):
tree = xml.etree.ElementTree.fromstring(response.content.decode())
else:
tree = xml.etree.ElementTree.fromstring(response.content)
channel = tree.find('channel')
return channel.findall('item')
|
[
"def",
"_get",
"(",
"pypi_server",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"pypi_server",
")",
"if",
"response",
".",
"status_code",
">=",
"300",
":",
"raise",
"HTTPError",
"(",
"status_code",
"=",
"response",
".",
"status_code",
",",
"reason",
"=",
"response",
".",
"reason",
")",
"if",
"hasattr",
"(",
"response",
".",
"content",
",",
"'decode'",
")",
":",
"tree",
"=",
"xml",
".",
"etree",
".",
"ElementTree",
".",
"fromstring",
"(",
"response",
".",
"content",
".",
"decode",
"(",
")",
")",
"else",
":",
"tree",
"=",
"xml",
".",
"etree",
".",
"ElementTree",
".",
"fromstring",
"(",
"response",
".",
"content",
")",
"channel",
"=",
"tree",
".",
"find",
"(",
"'channel'",
")",
"return",
"channel",
".",
"findall",
"(",
"'item'",
")"
] |
Query the PyPI RSS feed and return a list
of XML items.
|
[
"Query",
"the",
"PyPI",
"RSS",
"feed",
"and",
"return",
"a",
"list",
"of",
"XML",
"items",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/yarg/parse.py#L33-L47
|
train
|
pypa/pipenv
|
pipenv/vendor/yarg/parse.py
|
newest_packages
|
def newest_packages(
pypi_server="https://pypi.python.org/pypi?%3Aaction=packages_rss"):
"""
Constructs a request to the PyPI server and returns a list of
:class:`yarg.parse.Package`.
:param pypi_server: (option) URL to the PyPI server.
>>> import yarg
>>> yarg.newest_packages()
[<Package yarg>, <Package gray>, <Package ragy>]
"""
items = _get(pypi_server)
i = []
for item in items:
i_dict = {'name': item[0].text.split()[0],
'url': item[1].text,
'description': item[3].text,
'date': item[4].text}
i.append(Package(i_dict))
return i
|
python
|
def newest_packages(
pypi_server="https://pypi.python.org/pypi?%3Aaction=packages_rss"):
"""
Constructs a request to the PyPI server and returns a list of
:class:`yarg.parse.Package`.
:param pypi_server: (option) URL to the PyPI server.
>>> import yarg
>>> yarg.newest_packages()
[<Package yarg>, <Package gray>, <Package ragy>]
"""
items = _get(pypi_server)
i = []
for item in items:
i_dict = {'name': item[0].text.split()[0],
'url': item[1].text,
'description': item[3].text,
'date': item[4].text}
i.append(Package(i_dict))
return i
|
[
"def",
"newest_packages",
"(",
"pypi_server",
"=",
"\"https://pypi.python.org/pypi?%3Aaction=packages_rss\"",
")",
":",
"items",
"=",
"_get",
"(",
"pypi_server",
")",
"i",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"i_dict",
"=",
"{",
"'name'",
":",
"item",
"[",
"0",
"]",
".",
"text",
".",
"split",
"(",
")",
"[",
"0",
"]",
",",
"'url'",
":",
"item",
"[",
"1",
"]",
".",
"text",
",",
"'description'",
":",
"item",
"[",
"3",
"]",
".",
"text",
",",
"'date'",
":",
"item",
"[",
"4",
"]",
".",
"text",
"}",
"i",
".",
"append",
"(",
"Package",
"(",
"i_dict",
")",
")",
"return",
"i"
] |
Constructs a request to the PyPI server and returns a list of
:class:`yarg.parse.Package`.
:param pypi_server: (option) URL to the PyPI server.
>>> import yarg
>>> yarg.newest_packages()
[<Package yarg>, <Package gray>, <Package ragy>]
|
[
"Constructs",
"a",
"request",
"to",
"the",
"PyPI",
"server",
"and",
"returns",
"a",
"list",
"of",
":",
"class",
":",
"yarg",
".",
"parse",
".",
"Package",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/yarg/parse.py#L50-L70
|
train
|
pypa/pipenv
|
pipenv/vendor/passa/models/lockers.py
|
_get_requirements
|
def _get_requirements(model, section_name):
"""Produce a mapping of identifier: requirement from the section.
"""
if not model:
return {}
return {identify_requirment(r): r for r in (
requirementslib.Requirement.from_pipfile(name, package._data)
for name, package in model.get(section_name, {}).items()
)}
|
python
|
def _get_requirements(model, section_name):
"""Produce a mapping of identifier: requirement from the section.
"""
if not model:
return {}
return {identify_requirment(r): r for r in (
requirementslib.Requirement.from_pipfile(name, package._data)
for name, package in model.get(section_name, {}).items()
)}
|
[
"def",
"_get_requirements",
"(",
"model",
",",
"section_name",
")",
":",
"if",
"not",
"model",
":",
"return",
"{",
"}",
"return",
"{",
"identify_requirment",
"(",
"r",
")",
":",
"r",
"for",
"r",
"in",
"(",
"requirementslib",
".",
"Requirement",
".",
"from_pipfile",
"(",
"name",
",",
"package",
".",
"_data",
")",
"for",
"name",
",",
"package",
"in",
"model",
".",
"get",
"(",
"section_name",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
")",
"}"
] |
Produce a mapping of identifier: requirement from the section.
|
[
"Produce",
"a",
"mapping",
"of",
"identifier",
":",
"requirement",
"from",
"the",
"section",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/passa/models/lockers.py#L22-L30
|
train
|
pypa/pipenv
|
pipenv/vendor/passa/models/lockers.py
|
_collect_derived_entries
|
def _collect_derived_entries(state, traces, identifiers):
"""Produce a mapping containing all candidates derived from `identifiers`.
`identifiers` should provide a collection of requirement identifications
from a section (i.e. `packages` or `dev-packages`). This function uses
`trace` to filter out candidates in the state that are present because of
an entry in that collection.
"""
identifiers = set(identifiers)
if not identifiers:
return {}
entries = {}
extras = {}
for identifier, requirement in state.mapping.items():
routes = {trace[1] for trace in traces[identifier] if len(trace) > 1}
if identifier not in identifiers and not (identifiers & routes):
continue
name = requirement.normalized_name
if requirement.extras:
# Aggregate extras from multiple routes so we can produce their
# union in the lock file. (sarugaku/passa#24)
try:
extras[name].extend(requirement.extras)
except KeyError:
extras[name] = list(requirement.extras)
entries[name] = next(iter(requirement.as_pipfile().values()))
for name, ext in extras.items():
entries[name]["extras"] = ext
return entries
|
python
|
def _collect_derived_entries(state, traces, identifiers):
"""Produce a mapping containing all candidates derived from `identifiers`.
`identifiers` should provide a collection of requirement identifications
from a section (i.e. `packages` or `dev-packages`). This function uses
`trace` to filter out candidates in the state that are present because of
an entry in that collection.
"""
identifiers = set(identifiers)
if not identifiers:
return {}
entries = {}
extras = {}
for identifier, requirement in state.mapping.items():
routes = {trace[1] for trace in traces[identifier] if len(trace) > 1}
if identifier not in identifiers and not (identifiers & routes):
continue
name = requirement.normalized_name
if requirement.extras:
# Aggregate extras from multiple routes so we can produce their
# union in the lock file. (sarugaku/passa#24)
try:
extras[name].extend(requirement.extras)
except KeyError:
extras[name] = list(requirement.extras)
entries[name] = next(iter(requirement.as_pipfile().values()))
for name, ext in extras.items():
entries[name]["extras"] = ext
return entries
|
[
"def",
"_collect_derived_entries",
"(",
"state",
",",
"traces",
",",
"identifiers",
")",
":",
"identifiers",
"=",
"set",
"(",
"identifiers",
")",
"if",
"not",
"identifiers",
":",
"return",
"{",
"}",
"entries",
"=",
"{",
"}",
"extras",
"=",
"{",
"}",
"for",
"identifier",
",",
"requirement",
"in",
"state",
".",
"mapping",
".",
"items",
"(",
")",
":",
"routes",
"=",
"{",
"trace",
"[",
"1",
"]",
"for",
"trace",
"in",
"traces",
"[",
"identifier",
"]",
"if",
"len",
"(",
"trace",
")",
">",
"1",
"}",
"if",
"identifier",
"not",
"in",
"identifiers",
"and",
"not",
"(",
"identifiers",
"&",
"routes",
")",
":",
"continue",
"name",
"=",
"requirement",
".",
"normalized_name",
"if",
"requirement",
".",
"extras",
":",
"# Aggregate extras from multiple routes so we can produce their",
"# union in the lock file. (sarugaku/passa#24)",
"try",
":",
"extras",
"[",
"name",
"]",
".",
"extend",
"(",
"requirement",
".",
"extras",
")",
"except",
"KeyError",
":",
"extras",
"[",
"name",
"]",
"=",
"list",
"(",
"requirement",
".",
"extras",
")",
"entries",
"[",
"name",
"]",
"=",
"next",
"(",
"iter",
"(",
"requirement",
".",
"as_pipfile",
"(",
")",
".",
"values",
"(",
")",
")",
")",
"for",
"name",
",",
"ext",
"in",
"extras",
".",
"items",
"(",
")",
":",
"entries",
"[",
"name",
"]",
"[",
"\"extras\"",
"]",
"=",
"ext",
"return",
"entries"
] |
Produce a mapping containing all candidates derived from `identifiers`.
`identifiers` should provide a collection of requirement identifications
from a section (i.e. `packages` or `dev-packages`). This function uses
`trace` to filter out candidates in the state that are present because of
an entry in that collection.
|
[
"Produce",
"a",
"mapping",
"containing",
"all",
"candidates",
"derived",
"from",
"identifiers",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/passa/models/lockers.py#L48-L78
|
train
|
pypa/pipenv
|
pipenv/vendor/passa/models/lockers.py
|
AbstractLocker.lock
|
def lock(self):
"""Lock specified (abstract) requirements into (concrete) candidates.
The locking procedure consists of four stages:
* Resolve versions and dependency graph (powered by ResolveLib).
* Walk the graph to determine "why" each candidate came to be, i.e.
what top-level requirements result in a given candidate.
* Populate hashes for resolved candidates.
* Populate markers based on dependency specifications of each
candidate, and the dependency graph.
"""
provider = self.get_provider()
reporter = self.get_reporter()
resolver = resolvelib.Resolver(provider, reporter)
with vistir.cd(self.project.root):
state = resolver.resolve(self.requirements)
traces = trace_graph(state.graph)
hash_cache = HashCache()
for r in state.mapping.values():
if not r.hashes:
r.hashes = get_hashes(hash_cache, r)
set_metadata(
state.mapping, traces,
provider.fetched_dependencies,
provider.collected_requires_pythons,
)
lockfile = plette.Lockfile.with_meta_from(self.project.pipfile)
lockfile["default"] = _collect_derived_entries(
state, traces, self.default_requirements,
)
lockfile["develop"] = _collect_derived_entries(
state, traces, self.develop_requirements,
)
self.project.lockfile = lockfile
|
python
|
def lock(self):
"""Lock specified (abstract) requirements into (concrete) candidates.
The locking procedure consists of four stages:
* Resolve versions and dependency graph (powered by ResolveLib).
* Walk the graph to determine "why" each candidate came to be, i.e.
what top-level requirements result in a given candidate.
* Populate hashes for resolved candidates.
* Populate markers based on dependency specifications of each
candidate, and the dependency graph.
"""
provider = self.get_provider()
reporter = self.get_reporter()
resolver = resolvelib.Resolver(provider, reporter)
with vistir.cd(self.project.root):
state = resolver.resolve(self.requirements)
traces = trace_graph(state.graph)
hash_cache = HashCache()
for r in state.mapping.values():
if not r.hashes:
r.hashes = get_hashes(hash_cache, r)
set_metadata(
state.mapping, traces,
provider.fetched_dependencies,
provider.collected_requires_pythons,
)
lockfile = plette.Lockfile.with_meta_from(self.project.pipfile)
lockfile["default"] = _collect_derived_entries(
state, traces, self.default_requirements,
)
lockfile["develop"] = _collect_derived_entries(
state, traces, self.develop_requirements,
)
self.project.lockfile = lockfile
|
[
"def",
"lock",
"(",
"self",
")",
":",
"provider",
"=",
"self",
".",
"get_provider",
"(",
")",
"reporter",
"=",
"self",
".",
"get_reporter",
"(",
")",
"resolver",
"=",
"resolvelib",
".",
"Resolver",
"(",
"provider",
",",
"reporter",
")",
"with",
"vistir",
".",
"cd",
"(",
"self",
".",
"project",
".",
"root",
")",
":",
"state",
"=",
"resolver",
".",
"resolve",
"(",
"self",
".",
"requirements",
")",
"traces",
"=",
"trace_graph",
"(",
"state",
".",
"graph",
")",
"hash_cache",
"=",
"HashCache",
"(",
")",
"for",
"r",
"in",
"state",
".",
"mapping",
".",
"values",
"(",
")",
":",
"if",
"not",
"r",
".",
"hashes",
":",
"r",
".",
"hashes",
"=",
"get_hashes",
"(",
"hash_cache",
",",
"r",
")",
"set_metadata",
"(",
"state",
".",
"mapping",
",",
"traces",
",",
"provider",
".",
"fetched_dependencies",
",",
"provider",
".",
"collected_requires_pythons",
",",
")",
"lockfile",
"=",
"plette",
".",
"Lockfile",
".",
"with_meta_from",
"(",
"self",
".",
"project",
".",
"pipfile",
")",
"lockfile",
"[",
"\"default\"",
"]",
"=",
"_collect_derived_entries",
"(",
"state",
",",
"traces",
",",
"self",
".",
"default_requirements",
",",
")",
"lockfile",
"[",
"\"develop\"",
"]",
"=",
"_collect_derived_entries",
"(",
"state",
",",
"traces",
",",
"self",
".",
"develop_requirements",
",",
")",
"self",
".",
"project",
".",
"lockfile",
"=",
"lockfile"
] |
Lock specified (abstract) requirements into (concrete) candidates.
The locking procedure consists of four stages:
* Resolve versions and dependency graph (powered by ResolveLib).
* Walk the graph to determine "why" each candidate came to be, i.e.
what top-level requirements result in a given candidate.
* Populate hashes for resolved candidates.
* Populate markers based on dependency specifications of each
candidate, and the dependency graph.
|
[
"Lock",
"specified",
"(",
"abstract",
")",
"requirements",
"into",
"(",
"concrete",
")",
"candidates",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/passa/models/lockers.py#L124-L163
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/utils/logging.py
|
setup_logging
|
def setup_logging(verbosity, no_color, user_log_file):
"""Configures and sets up all of the logging
Returns the requested logging level, as its integer value.
"""
# Determine the level to be logging at.
if verbosity >= 1:
level = "DEBUG"
elif verbosity == -1:
level = "WARNING"
elif verbosity == -2:
level = "ERROR"
elif verbosity <= -3:
level = "CRITICAL"
else:
level = "INFO"
level_number = getattr(logging, level)
# The "root" logger should match the "console" level *unless* we also need
# to log to a user log file.
include_user_log = user_log_file is not None
if include_user_log:
additional_log_file = user_log_file
root_level = "DEBUG"
else:
additional_log_file = "/dev/null"
root_level = level
# Disable any logging besides WARNING unless we have DEBUG level logging
# enabled for vendored libraries.
vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
# Shorthands for clarity
log_streams = {
"stdout": "ext://sys.stdout",
"stderr": "ext://sys.stderr",
}
handler_classes = {
"stream": "pip._internal.utils.logging.ColorizedStreamHandler",
"file": "pip._internal.utils.logging.BetterRotatingFileHandler",
}
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip._internal.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
"indent_with_timestamp": {
"()": IndentingFormatter,
"format": "%(message)s",
"add_timestamp": True,
},
},
"handlers": {
"console": {
"level": level,
"class": handler_classes["stream"],
"no_color": no_color,
"stream": log_streams["stdout"],
"filters": ["exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": handler_classes["stream"],
"no_color": no_color,
"stream": log_streams["stderr"],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": handler_classes["file"],
"filename": additional_log_file,
"delay": True,
"formatter": "indent_with_timestamp",
},
},
"root": {
"level": root_level,
"handlers": ["console", "console_errors"] + (
["user_log"] if include_user_log else []
),
},
"loggers": {
"pip._vendor": {
"level": vendored_log_level
}
},
})
return level_number
|
python
|
def setup_logging(verbosity, no_color, user_log_file):
"""Configures and sets up all of the logging
Returns the requested logging level, as its integer value.
"""
# Determine the level to be logging at.
if verbosity >= 1:
level = "DEBUG"
elif verbosity == -1:
level = "WARNING"
elif verbosity == -2:
level = "ERROR"
elif verbosity <= -3:
level = "CRITICAL"
else:
level = "INFO"
level_number = getattr(logging, level)
# The "root" logger should match the "console" level *unless* we also need
# to log to a user log file.
include_user_log = user_log_file is not None
if include_user_log:
additional_log_file = user_log_file
root_level = "DEBUG"
else:
additional_log_file = "/dev/null"
root_level = level
# Disable any logging besides WARNING unless we have DEBUG level logging
# enabled for vendored libraries.
vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
# Shorthands for clarity
log_streams = {
"stdout": "ext://sys.stdout",
"stderr": "ext://sys.stderr",
}
handler_classes = {
"stream": "pip._internal.utils.logging.ColorizedStreamHandler",
"file": "pip._internal.utils.logging.BetterRotatingFileHandler",
}
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip._internal.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
"indent_with_timestamp": {
"()": IndentingFormatter,
"format": "%(message)s",
"add_timestamp": True,
},
},
"handlers": {
"console": {
"level": level,
"class": handler_classes["stream"],
"no_color": no_color,
"stream": log_streams["stdout"],
"filters": ["exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": handler_classes["stream"],
"no_color": no_color,
"stream": log_streams["stderr"],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": handler_classes["file"],
"filename": additional_log_file,
"delay": True,
"formatter": "indent_with_timestamp",
},
},
"root": {
"level": root_level,
"handlers": ["console", "console_errors"] + (
["user_log"] if include_user_log else []
),
},
"loggers": {
"pip._vendor": {
"level": vendored_log_level
}
},
})
return level_number
|
[
"def",
"setup_logging",
"(",
"verbosity",
",",
"no_color",
",",
"user_log_file",
")",
":",
"# Determine the level to be logging at.",
"if",
"verbosity",
">=",
"1",
":",
"level",
"=",
"\"DEBUG\"",
"elif",
"verbosity",
"==",
"-",
"1",
":",
"level",
"=",
"\"WARNING\"",
"elif",
"verbosity",
"==",
"-",
"2",
":",
"level",
"=",
"\"ERROR\"",
"elif",
"verbosity",
"<=",
"-",
"3",
":",
"level",
"=",
"\"CRITICAL\"",
"else",
":",
"level",
"=",
"\"INFO\"",
"level_number",
"=",
"getattr",
"(",
"logging",
",",
"level",
")",
"# The \"root\" logger should match the \"console\" level *unless* we also need",
"# to log to a user log file.",
"include_user_log",
"=",
"user_log_file",
"is",
"not",
"None",
"if",
"include_user_log",
":",
"additional_log_file",
"=",
"user_log_file",
"root_level",
"=",
"\"DEBUG\"",
"else",
":",
"additional_log_file",
"=",
"\"/dev/null\"",
"root_level",
"=",
"level",
"# Disable any logging besides WARNING unless we have DEBUG level logging",
"# enabled for vendored libraries.",
"vendored_log_level",
"=",
"\"WARNING\"",
"if",
"level",
"in",
"[",
"\"INFO\"",
",",
"\"ERROR\"",
"]",
"else",
"\"DEBUG\"",
"# Shorthands for clarity",
"log_streams",
"=",
"{",
"\"stdout\"",
":",
"\"ext://sys.stdout\"",
",",
"\"stderr\"",
":",
"\"ext://sys.stderr\"",
",",
"}",
"handler_classes",
"=",
"{",
"\"stream\"",
":",
"\"pip._internal.utils.logging.ColorizedStreamHandler\"",
",",
"\"file\"",
":",
"\"pip._internal.utils.logging.BetterRotatingFileHandler\"",
",",
"}",
"logging",
".",
"config",
".",
"dictConfig",
"(",
"{",
"\"version\"",
":",
"1",
",",
"\"disable_existing_loggers\"",
":",
"False",
",",
"\"filters\"",
":",
"{",
"\"exclude_warnings\"",
":",
"{",
"\"()\"",
":",
"\"pip._internal.utils.logging.MaxLevelFilter\"",
",",
"\"level\"",
":",
"logging",
".",
"WARNING",
",",
"}",
",",
"}",
",",
"\"formatters\"",
":",
"{",
"\"indent\"",
":",
"{",
"\"()\"",
":",
"IndentingFormatter",
",",
"\"format\"",
":",
"\"%(message)s\"",
",",
"}",
",",
"\"indent_with_timestamp\"",
":",
"{",
"\"()\"",
":",
"IndentingFormatter",
",",
"\"format\"",
":",
"\"%(message)s\"",
",",
"\"add_timestamp\"",
":",
"True",
",",
"}",
",",
"}",
",",
"\"handlers\"",
":",
"{",
"\"console\"",
":",
"{",
"\"level\"",
":",
"level",
",",
"\"class\"",
":",
"handler_classes",
"[",
"\"stream\"",
"]",
",",
"\"no_color\"",
":",
"no_color",
",",
"\"stream\"",
":",
"log_streams",
"[",
"\"stdout\"",
"]",
",",
"\"filters\"",
":",
"[",
"\"exclude_warnings\"",
"]",
",",
"\"formatter\"",
":",
"\"indent\"",
",",
"}",
",",
"\"console_errors\"",
":",
"{",
"\"level\"",
":",
"\"WARNING\"",
",",
"\"class\"",
":",
"handler_classes",
"[",
"\"stream\"",
"]",
",",
"\"no_color\"",
":",
"no_color",
",",
"\"stream\"",
":",
"log_streams",
"[",
"\"stderr\"",
"]",
",",
"\"formatter\"",
":",
"\"indent\"",
",",
"}",
",",
"\"user_log\"",
":",
"{",
"\"level\"",
":",
"\"DEBUG\"",
",",
"\"class\"",
":",
"handler_classes",
"[",
"\"file\"",
"]",
",",
"\"filename\"",
":",
"additional_log_file",
",",
"\"delay\"",
":",
"True",
",",
"\"formatter\"",
":",
"\"indent_with_timestamp\"",
",",
"}",
",",
"}",
",",
"\"root\"",
":",
"{",
"\"level\"",
":",
"root_level",
",",
"\"handlers\"",
":",
"[",
"\"console\"",
",",
"\"console_errors\"",
"]",
"+",
"(",
"[",
"\"user_log\"",
"]",
"if",
"include_user_log",
"else",
"[",
"]",
")",
",",
"}",
",",
"\"loggers\"",
":",
"{",
"\"pip._vendor\"",
":",
"{",
"\"level\"",
":",
"vendored_log_level",
"}",
"}",
",",
"}",
")",
"return",
"level_number"
] |
Configures and sets up all of the logging
Returns the requested logging level, as its integer value.
|
[
"Configures",
"and",
"sets",
"up",
"all",
"of",
"the",
"logging"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/utils/logging.py#L217-L318
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/utils/logging.py
|
IndentingFormatter.format
|
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = super(IndentingFormatter, self).format(record)
prefix = ''
if self.add_timestamp:
prefix = self.formatTime(record, "%Y-%m-%dT%H:%M:%S ")
prefix += " " * get_indentation()
formatted = "".join([
prefix + line
for line in formatted.splitlines(True)
])
return formatted
|
python
|
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = super(IndentingFormatter, self).format(record)
prefix = ''
if self.add_timestamp:
prefix = self.formatTime(record, "%Y-%m-%dT%H:%M:%S ")
prefix += " " * get_indentation()
formatted = "".join([
prefix + line
for line in formatted.splitlines(True)
])
return formatted
|
[
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"formatted",
"=",
"super",
"(",
"IndentingFormatter",
",",
"self",
")",
".",
"format",
"(",
"record",
")",
"prefix",
"=",
"''",
"if",
"self",
".",
"add_timestamp",
":",
"prefix",
"=",
"self",
".",
"formatTime",
"(",
"record",
",",
"\"%Y-%m-%dT%H:%M:%S \"",
")",
"prefix",
"+=",
"\" \"",
"*",
"get_indentation",
"(",
")",
"formatted",
"=",
"\"\"",
".",
"join",
"(",
"[",
"prefix",
"+",
"line",
"for",
"line",
"in",
"formatted",
".",
"splitlines",
"(",
"True",
")",
"]",
")",
"return",
"formatted"
] |
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
|
[
"Calls",
"the",
"standard",
"formatter",
"but",
"will",
"indent",
"all",
"of",
"the",
"log",
"messages",
"by",
"our",
"current",
"indentation",
"level",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/utils/logging.py#L103-L117
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/utils/logging.py
|
ColorizedStreamHandler._using_stdout
|
def _using_stdout(self):
"""
Return whether the handler is using sys.stdout.
"""
if WINDOWS and colorama:
# Then self.stream is an AnsiToWin32 object.
return self.stream.wrapped is sys.stdout
return self.stream is sys.stdout
|
python
|
def _using_stdout(self):
"""
Return whether the handler is using sys.stdout.
"""
if WINDOWS and colorama:
# Then self.stream is an AnsiToWin32 object.
return self.stream.wrapped is sys.stdout
return self.stream is sys.stdout
|
[
"def",
"_using_stdout",
"(",
"self",
")",
":",
"if",
"WINDOWS",
"and",
"colorama",
":",
"# Then self.stream is an AnsiToWin32 object.",
"return",
"self",
".",
"stream",
".",
"wrapped",
"is",
"sys",
".",
"stdout",
"return",
"self",
".",
"stream",
"is",
"sys",
".",
"stdout"
] |
Return whether the handler is using sys.stdout.
|
[
"Return",
"whether",
"the",
"handler",
"is",
"using",
"sys",
".",
"stdout",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/utils/logging.py#L145-L153
|
train
|
pypa/pipenv
|
pipenv/vendor/dotenv/environ.py
|
_cast_boolean
|
def _cast_boolean(value):
"""
Helper to convert config values to boolean as ConfigParser do.
"""
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False, '': False}
value = str(value)
if value.lower() not in _BOOLEANS:
raise ValueError('Not a boolean: %s' % value)
return _BOOLEANS[value.lower()]
|
python
|
def _cast_boolean(value):
"""
Helper to convert config values to boolean as ConfigParser do.
"""
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False, '': False}
value = str(value)
if value.lower() not in _BOOLEANS:
raise ValueError('Not a boolean: %s' % value)
return _BOOLEANS[value.lower()]
|
[
"def",
"_cast_boolean",
"(",
"value",
")",
":",
"_BOOLEANS",
"=",
"{",
"'1'",
":",
"True",
",",
"'yes'",
":",
"True",
",",
"'true'",
":",
"True",
",",
"'on'",
":",
"True",
",",
"'0'",
":",
"False",
",",
"'no'",
":",
"False",
",",
"'false'",
":",
"False",
",",
"'off'",
":",
"False",
",",
"''",
":",
"False",
"}",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"value",
".",
"lower",
"(",
")",
"not",
"in",
"_BOOLEANS",
":",
"raise",
"ValueError",
"(",
"'Not a boolean: %s'",
"%",
"value",
")",
"return",
"_BOOLEANS",
"[",
"value",
".",
"lower",
"(",
")",
"]"
] |
Helper to convert config values to boolean as ConfigParser do.
|
[
"Helper",
"to",
"convert",
"config",
"values",
"to",
"boolean",
"as",
"ConfigParser",
"do",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/dotenv/environ.py#L17-L27
|
train
|
pypa/pipenv
|
pipenv/vendor/dotenv/environ.py
|
getenv
|
def getenv(option, default=undefined, cast=undefined):
"""
Return the value for option or default if defined.
"""
# We can't avoid __contains__ because value may be empty.
if option in os.environ:
value = os.environ[option]
else:
if isinstance(default, Undefined):
raise UndefinedValueError('{} not found. Declare it as envvar or define a default value.'.format(option))
value = default
if isinstance(cast, Undefined):
return value
if cast is bool:
value = _cast_boolean(value)
elif cast is list:
value = [x for x in value.split(',') if x]
else:
value = cast(value)
return value
|
python
|
def getenv(option, default=undefined, cast=undefined):
"""
Return the value for option or default if defined.
"""
# We can't avoid __contains__ because value may be empty.
if option in os.environ:
value = os.environ[option]
else:
if isinstance(default, Undefined):
raise UndefinedValueError('{} not found. Declare it as envvar or define a default value.'.format(option))
value = default
if isinstance(cast, Undefined):
return value
if cast is bool:
value = _cast_boolean(value)
elif cast is list:
value = [x for x in value.split(',') if x]
else:
value = cast(value)
return value
|
[
"def",
"getenv",
"(",
"option",
",",
"default",
"=",
"undefined",
",",
"cast",
"=",
"undefined",
")",
":",
"# We can't avoid __contains__ because value may be empty.",
"if",
"option",
"in",
"os",
".",
"environ",
":",
"value",
"=",
"os",
".",
"environ",
"[",
"option",
"]",
"else",
":",
"if",
"isinstance",
"(",
"default",
",",
"Undefined",
")",
":",
"raise",
"UndefinedValueError",
"(",
"'{} not found. Declare it as envvar or define a default value.'",
".",
"format",
"(",
"option",
")",
")",
"value",
"=",
"default",
"if",
"isinstance",
"(",
"cast",
",",
"Undefined",
")",
":",
"return",
"value",
"if",
"cast",
"is",
"bool",
":",
"value",
"=",
"_cast_boolean",
"(",
"value",
")",
"elif",
"cast",
"is",
"list",
":",
"value",
"=",
"[",
"x",
"for",
"x",
"in",
"value",
".",
"split",
"(",
"','",
")",
"if",
"x",
"]",
"else",
":",
"value",
"=",
"cast",
"(",
"value",
")",
"return",
"value"
] |
Return the value for option or default if defined.
|
[
"Return",
"the",
"value",
"for",
"option",
"or",
"default",
"if",
"defined",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/dotenv/environ.py#L30-L54
|
train
|
pypa/pipenv
|
pipenv/vendor/click/formatting.py
|
join_options
|
def join_options(options):
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == '/':
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
rv = ', '.join(x[1] for x in rv)
return rv, any_prefix_is_slash
|
python
|
def join_options(options):
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == '/':
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
rv = ', '.join(x[1] for x in rv)
return rv, any_prefix_is_slash
|
[
"def",
"join_options",
"(",
"options",
")",
":",
"rv",
"=",
"[",
"]",
"any_prefix_is_slash",
"=",
"False",
"for",
"opt",
"in",
"options",
":",
"prefix",
"=",
"split_opt",
"(",
"opt",
")",
"[",
"0",
"]",
"if",
"prefix",
"==",
"'/'",
":",
"any_prefix_is_slash",
"=",
"True",
"rv",
".",
"append",
"(",
"(",
"len",
"(",
"prefix",
")",
",",
"opt",
")",
")",
"rv",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
"rv",
"=",
"', '",
".",
"join",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"rv",
")",
"return",
"rv",
",",
"any_prefix_is_slash"
] |
Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
|
[
"Given",
"a",
"list",
"of",
"option",
"strings",
"this",
"joins",
"them",
"in",
"the",
"most",
"appropriate",
"way",
"and",
"returns",
"them",
"in",
"the",
"form",
"(",
"formatted_string",
"any_prefix_is_slash",
")",
"where",
"the",
"second",
"item",
"in",
"the",
"tuple",
"is",
"a",
"flag",
"that",
"indicates",
"if",
"any",
"of",
"the",
"option",
"prefixes",
"was",
"a",
"slash",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/formatting.py#L239-L256
|
train
|
pypa/pipenv
|
pipenv/vendor/click/formatting.py
|
HelpFormatter.write_usage
|
def write_usage(self, prog, args='', prefix='Usage: '):
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
"""
usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = ' ' * term_len(usage_prefix)
self.write(wrap_text(args, text_width,
initial_indent=usage_prefix,
subsequent_indent=indent))
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write('\n')
indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
self.write(wrap_text(args, text_width,
initial_indent=indent,
subsequent_indent=indent))
self.write('\n')
|
python
|
def write_usage(self, prog, args='', prefix='Usage: '):
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
"""
usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = ' ' * term_len(usage_prefix)
self.write(wrap_text(args, text_width,
initial_indent=usage_prefix,
subsequent_indent=indent))
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write('\n')
indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
self.write(wrap_text(args, text_width,
initial_indent=indent,
subsequent_indent=indent))
self.write('\n')
|
[
"def",
"write_usage",
"(",
"self",
",",
"prog",
",",
"args",
"=",
"''",
",",
"prefix",
"=",
"'Usage: '",
")",
":",
"usage_prefix",
"=",
"'%*s%s '",
"%",
"(",
"self",
".",
"current_indent",
",",
"prefix",
",",
"prog",
")",
"text_width",
"=",
"self",
".",
"width",
"-",
"self",
".",
"current_indent",
"if",
"text_width",
">=",
"(",
"term_len",
"(",
"usage_prefix",
")",
"+",
"20",
")",
":",
"# The arguments will fit to the right of the prefix.",
"indent",
"=",
"' '",
"*",
"term_len",
"(",
"usage_prefix",
")",
"self",
".",
"write",
"(",
"wrap_text",
"(",
"args",
",",
"text_width",
",",
"initial_indent",
"=",
"usage_prefix",
",",
"subsequent_indent",
"=",
"indent",
")",
")",
"else",
":",
"# The prefix is too long, put the arguments on the next line.",
"self",
".",
"write",
"(",
"usage_prefix",
")",
"self",
".",
"write",
"(",
"'\\n'",
")",
"indent",
"=",
"' '",
"*",
"(",
"max",
"(",
"self",
".",
"current_indent",
",",
"term_len",
"(",
"prefix",
")",
")",
"+",
"4",
")",
"self",
".",
"write",
"(",
"wrap_text",
"(",
"args",
",",
"text_width",
",",
"initial_indent",
"=",
"indent",
",",
"subsequent_indent",
"=",
"indent",
")",
")",
"self",
".",
"write",
"(",
"'\\n'",
")"
] |
Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
|
[
"Writes",
"a",
"usage",
"line",
"into",
"the",
"buffer",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/formatting.py#L125-L150
|
train
|
pypa/pipenv
|
pipenv/vendor/click/formatting.py
|
HelpFormatter.write_text
|
def write_text(self, text):
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
text_width = max(self.width - self.current_indent, 11)
indent = ' ' * self.current_indent
self.write(wrap_text(text, text_width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True))
self.write('\n')
|
python
|
def write_text(self, text):
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
text_width = max(self.width - self.current_indent, 11)
indent = ' ' * self.current_indent
self.write(wrap_text(text, text_width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True))
self.write('\n')
|
[
"def",
"write_text",
"(",
"self",
",",
"text",
")",
":",
"text_width",
"=",
"max",
"(",
"self",
".",
"width",
"-",
"self",
".",
"current_indent",
",",
"11",
")",
"indent",
"=",
"' '",
"*",
"self",
".",
"current_indent",
"self",
".",
"write",
"(",
"wrap_text",
"(",
"text",
",",
"text_width",
",",
"initial_indent",
"=",
"indent",
",",
"subsequent_indent",
"=",
"indent",
",",
"preserve_paragraphs",
"=",
"True",
")",
")",
"self",
".",
"write",
"(",
"'\\n'",
")"
] |
Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
|
[
"Writes",
"re",
"-",
"indented",
"text",
"into",
"the",
"buffer",
".",
"This",
"rewraps",
"and",
"preserves",
"paragraphs",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/formatting.py#L161-L171
|
train
|
pypa/pipenv
|
pipenv/vendor/click/formatting.py
|
HelpFormatter.write_dl
|
def write_dl(self, rows, col_max=30, col_spacing=2):
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError('Expected two columns for definition list')
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write('%*s%s' % (self.current_indent, '', first))
if not second:
self.write('\n')
continue
if term_len(first) <= first_col - col_spacing:
self.write(' ' * (first_col - term_len(first)))
else:
self.write('\n')
self.write(' ' * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
lines = iter(wrap_text(second, text_width).splitlines())
if lines:
self.write(next(lines) + '\n')
for line in lines:
self.write('%*s%s\n' % (
first_col + self.current_indent, '', line))
else:
self.write('\n')
|
python
|
def write_dl(self, rows, col_max=30, col_spacing=2):
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError('Expected two columns for definition list')
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write('%*s%s' % (self.current_indent, '', first))
if not second:
self.write('\n')
continue
if term_len(first) <= first_col - col_spacing:
self.write(' ' * (first_col - term_len(first)))
else:
self.write('\n')
self.write(' ' * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
lines = iter(wrap_text(second, text_width).splitlines())
if lines:
self.write(next(lines) + '\n')
for line in lines:
self.write('%*s%s\n' % (
first_col + self.current_indent, '', line))
else:
self.write('\n')
|
[
"def",
"write_dl",
"(",
"self",
",",
"rows",
",",
"col_max",
"=",
"30",
",",
"col_spacing",
"=",
"2",
")",
":",
"rows",
"=",
"list",
"(",
"rows",
")",
"widths",
"=",
"measure_table",
"(",
"rows",
")",
"if",
"len",
"(",
"widths",
")",
"!=",
"2",
":",
"raise",
"TypeError",
"(",
"'Expected two columns for definition list'",
")",
"first_col",
"=",
"min",
"(",
"widths",
"[",
"0",
"]",
",",
"col_max",
")",
"+",
"col_spacing",
"for",
"first",
",",
"second",
"in",
"iter_rows",
"(",
"rows",
",",
"len",
"(",
"widths",
")",
")",
":",
"self",
".",
"write",
"(",
"'%*s%s'",
"%",
"(",
"self",
".",
"current_indent",
",",
"''",
",",
"first",
")",
")",
"if",
"not",
"second",
":",
"self",
".",
"write",
"(",
"'\\n'",
")",
"continue",
"if",
"term_len",
"(",
"first",
")",
"<=",
"first_col",
"-",
"col_spacing",
":",
"self",
".",
"write",
"(",
"' '",
"*",
"(",
"first_col",
"-",
"term_len",
"(",
"first",
")",
")",
")",
"else",
":",
"self",
".",
"write",
"(",
"'\\n'",
")",
"self",
".",
"write",
"(",
"' '",
"*",
"(",
"first_col",
"+",
"self",
".",
"current_indent",
")",
")",
"text_width",
"=",
"max",
"(",
"self",
".",
"width",
"-",
"first_col",
"-",
"2",
",",
"10",
")",
"lines",
"=",
"iter",
"(",
"wrap_text",
"(",
"second",
",",
"text_width",
")",
".",
"splitlines",
"(",
")",
")",
"if",
"lines",
":",
"self",
".",
"write",
"(",
"next",
"(",
"lines",
")",
"+",
"'\\n'",
")",
"for",
"line",
"in",
"lines",
":",
"self",
".",
"write",
"(",
"'%*s%s\\n'",
"%",
"(",
"first_col",
"+",
"self",
".",
"current_indent",
",",
"''",
",",
"line",
")",
")",
"else",
":",
"self",
".",
"write",
"(",
"'\\n'",
")"
] |
Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
|
[
"Writes",
"a",
"definition",
"list",
"into",
"the",
"buffer",
".",
"This",
"is",
"how",
"options",
"and",
"commands",
"are",
"usually",
"formatted",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/formatting.py#L173-L208
|
train
|
pypa/pipenv
|
pipenv/vendor/click/formatting.py
|
HelpFormatter.section
|
def section(self, name):
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
|
python
|
def section(self, name):
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
|
[
"def",
"section",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"write_paragraph",
"(",
")",
"self",
".",
"write_heading",
"(",
"name",
")",
"self",
".",
"indent",
"(",
")",
"try",
":",
"yield",
"finally",
":",
"self",
".",
"dedent",
"(",
")"
] |
Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
|
[
"Helpful",
"context",
"manager",
"that",
"writes",
"a",
"paragraph",
"a",
"heading",
"and",
"the",
"indents",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/formatting.py#L211-L223
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/cli/parser.py
|
invalid_config_error_message
|
def invalid_config_error_message(action, key, val):
"""Returns a better error message when invalid configuration option
is provided."""
if action in ('store_true', 'store_false'):
return ("{0} is not a valid value for {1} option, "
"please specify a boolean value like yes/no, "
"true/false or 1/0 instead.").format(val, key)
return ("{0} is not a valid value for {1} option, "
"please specify a numerical value like 1/0 "
"instead.").format(val, key)
|
python
|
def invalid_config_error_message(action, key, val):
"""Returns a better error message when invalid configuration option
is provided."""
if action in ('store_true', 'store_false'):
return ("{0} is not a valid value for {1} option, "
"please specify a boolean value like yes/no, "
"true/false or 1/0 instead.").format(val, key)
return ("{0} is not a valid value for {1} option, "
"please specify a numerical value like 1/0 "
"instead.").format(val, key)
|
[
"def",
"invalid_config_error_message",
"(",
"action",
",",
"key",
",",
"val",
")",
":",
"if",
"action",
"in",
"(",
"'store_true'",
",",
"'store_false'",
")",
":",
"return",
"(",
"\"{0} is not a valid value for {1} option, \"",
"\"please specify a boolean value like yes/no, \"",
"\"true/false or 1/0 instead.\"",
")",
".",
"format",
"(",
"val",
",",
"key",
")",
"return",
"(",
"\"{0} is not a valid value for {1} option, \"",
"\"please specify a numerical value like 1/0 \"",
"\"instead.\"",
")",
".",
"format",
"(",
"val",
",",
"key",
")"
] |
Returns a better error message when invalid configuration option
is provided.
|
[
"Returns",
"a",
"better",
"error",
"message",
"when",
"invalid",
"configuration",
"option",
"is",
"provided",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/parser.py#L251-L261
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/cli/parser.py
|
PrettyHelpFormatter._format_option_strings
|
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
|
python
|
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
|
[
"def",
"_format_option_strings",
"(",
"self",
",",
"option",
",",
"mvarfmt",
"=",
"' <%s>'",
",",
"optsep",
"=",
"', '",
")",
":",
"opts",
"=",
"[",
"]",
"if",
"option",
".",
"_short_opts",
":",
"opts",
".",
"append",
"(",
"option",
".",
"_short_opts",
"[",
"0",
"]",
")",
"if",
"option",
".",
"_long_opts",
":",
"opts",
".",
"append",
"(",
"option",
".",
"_long_opts",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"opts",
")",
">",
"1",
":",
"opts",
".",
"insert",
"(",
"1",
",",
"optsep",
")",
"if",
"option",
".",
"takes_value",
"(",
")",
":",
"metavar",
"=",
"option",
".",
"metavar",
"or",
"option",
".",
"dest",
".",
"lower",
"(",
")",
"opts",
".",
"append",
"(",
"mvarfmt",
"%",
"metavar",
".",
"lower",
"(",
")",
")",
"return",
"''",
".",
"join",
"(",
"opts",
")"
] |
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
|
[
"Return",
"a",
"comma",
"-",
"separated",
"list",
"of",
"option",
"strings",
"and",
"metavars",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/parser.py#L32-L53
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/cli/parser.py
|
PrettyHelpFormatter.format_usage
|
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
|
python
|
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
|
[
"def",
"format_usage",
"(",
"self",
",",
"usage",
")",
":",
"msg",
"=",
"'\\nUsage: %s\\n'",
"%",
"self",
".",
"indent_lines",
"(",
"textwrap",
".",
"dedent",
"(",
"usage",
")",
",",
"\" \"",
")",
"return",
"msg"
] |
Ensure there is only one newline between usage and the first heading
if there is no description.
|
[
"Ensure",
"there",
"is",
"only",
"one",
"newline",
"between",
"usage",
"and",
"the",
"first",
"heading",
"if",
"there",
"is",
"no",
"description",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/parser.py#L60-L66
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/cli/parser.py
|
CustomOptionParser.insert_option_group
|
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
|
python
|
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
|
[
"def",
"insert_option_group",
"(",
"self",
",",
"idx",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"group",
"=",
"self",
".",
"add_option_group",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"option_groups",
".",
"pop",
"(",
")",
"self",
".",
"option_groups",
".",
"insert",
"(",
"idx",
",",
"group",
")",
"return",
"group"
] |
Insert an OptionGroup at a given position.
|
[
"Insert",
"an",
"OptionGroup",
"at",
"a",
"given",
"position",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/parser.py#L113-L120
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/cli/parser.py
|
CustomOptionParser.option_list_all
|
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
|
python
|
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
|
[
"def",
"option_list_all",
"(",
"self",
")",
":",
"res",
"=",
"self",
".",
"option_list",
"[",
":",
"]",
"for",
"i",
"in",
"self",
".",
"option_groups",
":",
"res",
".",
"extend",
"(",
"i",
".",
"option_list",
")",
"return",
"res"
] |
Get a list of all options, including those in option groups.
|
[
"Get",
"a",
"list",
"of",
"all",
"options",
"including",
"those",
"in",
"option",
"groups",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/parser.py#L123-L129
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/cli/parser.py
|
ConfigOptionParser._update_defaults
|
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in self._get_ordered_configuration_items():
# '--' because configuration supports only long names
option = self.get_option('--' + key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
try:
val = strtobool(val)
except ValueError:
error_msg = invalid_config_error_message(
option.action, key, val
)
self.error(error_msg)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
|
python
|
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in self._get_ordered_configuration_items():
# '--' because configuration supports only long names
option = self.get_option('--' + key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
try:
val = strtobool(val)
except ValueError:
error_msg = invalid_config_error_message(
option.action, key, val
)
self.error(error_msg)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
|
[
"def",
"_update_defaults",
"(",
"self",
",",
"defaults",
")",
":",
"# Accumulate complex default state.",
"self",
".",
"values",
"=",
"optparse",
".",
"Values",
"(",
"self",
".",
"defaults",
")",
"late_eval",
"=",
"set",
"(",
")",
"# Then set the options with those values",
"for",
"key",
",",
"val",
"in",
"self",
".",
"_get_ordered_configuration_items",
"(",
")",
":",
"# '--' because configuration supports only long names",
"option",
"=",
"self",
".",
"get_option",
"(",
"'--'",
"+",
"key",
")",
"# Ignore options not present in this parser. E.g. non-globals put",
"# in [global] by users that want them to apply to all applicable",
"# commands.",
"if",
"option",
"is",
"None",
":",
"continue",
"if",
"option",
".",
"action",
"in",
"(",
"'store_true'",
",",
"'store_false'",
",",
"'count'",
")",
":",
"try",
":",
"val",
"=",
"strtobool",
"(",
"val",
")",
"except",
"ValueError",
":",
"error_msg",
"=",
"invalid_config_error_message",
"(",
"option",
".",
"action",
",",
"key",
",",
"val",
")",
"self",
".",
"error",
"(",
"error_msg",
")",
"elif",
"option",
".",
"action",
"==",
"'append'",
":",
"val",
"=",
"val",
".",
"split",
"(",
")",
"val",
"=",
"[",
"self",
".",
"check_default",
"(",
"option",
",",
"key",
",",
"v",
")",
"for",
"v",
"in",
"val",
"]",
"elif",
"option",
".",
"action",
"==",
"'callback'",
":",
"late_eval",
".",
"add",
"(",
"option",
".",
"dest",
")",
"opt_str",
"=",
"option",
".",
"get_opt_string",
"(",
")",
"val",
"=",
"option",
".",
"convert_value",
"(",
"opt_str",
",",
"val",
")",
"# From take_action",
"args",
"=",
"option",
".",
"callback_args",
"or",
"(",
")",
"kwargs",
"=",
"option",
".",
"callback_kwargs",
"or",
"{",
"}",
"option",
".",
"callback",
"(",
"option",
",",
"opt_str",
",",
"val",
",",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"val",
"=",
"self",
".",
"check_default",
"(",
"option",
",",
"key",
",",
"val",
")",
"defaults",
"[",
"option",
".",
"dest",
"]",
"=",
"val",
"for",
"key",
"in",
"late_eval",
":",
"defaults",
"[",
"key",
"]",
"=",
"getattr",
"(",
"self",
".",
"values",
",",
"key",
")",
"self",
".",
"values",
"=",
"None",
"return",
"defaults"
] |
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
|
[
"Updates",
"the",
"given",
"defaults",
"with",
"values",
"from",
"the",
"config",
"files",
"and",
"the",
"environ",
".",
"Does",
"a",
"little",
"special",
"handling",
"for",
"certain",
"types",
"of",
"options",
"(",
"lists",
")",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/parser.py#L176-L223
|
train
|
pypa/pipenv
|
pipenv/patched/notpip/_internal/cli/parser.py
|
ConfigOptionParser.get_default_values
|
def get_default_values(self):
"""Overriding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
# Load the configuration, or error out in case of an error
try:
self.config.load()
except ConfigurationError as err:
self.exit(UNKNOWN_ERROR, str(err))
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
|
python
|
def get_default_values(self):
"""Overriding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
# Load the configuration, or error out in case of an error
try:
self.config.load()
except ConfigurationError as err:
self.exit(UNKNOWN_ERROR, str(err))
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
|
[
"def",
"get_default_values",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"process_default_values",
":",
"# Old, pre-Optik 1.5 behaviour.",
"return",
"optparse",
".",
"Values",
"(",
"self",
".",
"defaults",
")",
"# Load the configuration, or error out in case of an error",
"try",
":",
"self",
".",
"config",
".",
"load",
"(",
")",
"except",
"ConfigurationError",
"as",
"err",
":",
"self",
".",
"exit",
"(",
"UNKNOWN_ERROR",
",",
"str",
"(",
"err",
")",
")",
"defaults",
"=",
"self",
".",
"_update_defaults",
"(",
"self",
".",
"defaults",
".",
"copy",
"(",
")",
")",
"# ours",
"for",
"option",
"in",
"self",
".",
"_get_all_options",
"(",
")",
":",
"default",
"=",
"defaults",
".",
"get",
"(",
"option",
".",
"dest",
")",
"if",
"isinstance",
"(",
"default",
",",
"string_types",
")",
":",
"opt_str",
"=",
"option",
".",
"get_opt_string",
"(",
")",
"defaults",
"[",
"option",
".",
"dest",
"]",
"=",
"option",
".",
"check_value",
"(",
"opt_str",
",",
"default",
")",
"return",
"optparse",
".",
"Values",
"(",
"defaults",
")"
] |
Overriding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work.
|
[
"Overriding",
"to",
"make",
"updating",
"the",
"defaults",
"after",
"instantiation",
"of",
"the",
"option",
"parser",
"possible",
"_update_defaults",
"()",
"does",
"the",
"dirty",
"work",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/parser.py#L225-L244
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.safe_import
|
def safe_import(self, name):
"""Helper utility for reimporting previously imported modules while inside the env"""
module = None
if name not in self._modules:
self._modules[name] = importlib.import_module(name)
module = self._modules[name]
if not module:
dist = next(iter(
dist for dist in self.base_working_set if dist.project_name == name
), None)
if dist:
dist.activate()
module = importlib.import_module(name)
if name in sys.modules:
try:
six.moves.reload_module(module)
six.moves.reload_module(sys.modules[name])
except TypeError:
del sys.modules[name]
sys.modules[name] = self._modules[name]
return module
|
python
|
def safe_import(self, name):
"""Helper utility for reimporting previously imported modules while inside the env"""
module = None
if name not in self._modules:
self._modules[name] = importlib.import_module(name)
module = self._modules[name]
if not module:
dist = next(iter(
dist for dist in self.base_working_set if dist.project_name == name
), None)
if dist:
dist.activate()
module = importlib.import_module(name)
if name in sys.modules:
try:
six.moves.reload_module(module)
six.moves.reload_module(sys.modules[name])
except TypeError:
del sys.modules[name]
sys.modules[name] = self._modules[name]
return module
|
[
"def",
"safe_import",
"(",
"self",
",",
"name",
")",
":",
"module",
"=",
"None",
"if",
"name",
"not",
"in",
"self",
".",
"_modules",
":",
"self",
".",
"_modules",
"[",
"name",
"]",
"=",
"importlib",
".",
"import_module",
"(",
"name",
")",
"module",
"=",
"self",
".",
"_modules",
"[",
"name",
"]",
"if",
"not",
"module",
":",
"dist",
"=",
"next",
"(",
"iter",
"(",
"dist",
"for",
"dist",
"in",
"self",
".",
"base_working_set",
"if",
"dist",
".",
"project_name",
"==",
"name",
")",
",",
"None",
")",
"if",
"dist",
":",
"dist",
".",
"activate",
"(",
")",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"name",
")",
"if",
"name",
"in",
"sys",
".",
"modules",
":",
"try",
":",
"six",
".",
"moves",
".",
"reload_module",
"(",
"module",
")",
"six",
".",
"moves",
".",
"reload_module",
"(",
"sys",
".",
"modules",
"[",
"name",
"]",
")",
"except",
"TypeError",
":",
"del",
"sys",
".",
"modules",
"[",
"name",
"]",
"sys",
".",
"modules",
"[",
"name",
"]",
"=",
"self",
".",
"_modules",
"[",
"name",
"]",
"return",
"module"
] |
Helper utility for reimporting previously imported modules while inside the env
|
[
"Helper",
"utility",
"for",
"reimporting",
"previously",
"imported",
"modules",
"while",
"inside",
"the",
"env"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L51-L71
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.resolve_dist
|
def resolve_dist(cls, dist, working_set):
"""Given a local distribution and a working set, returns all dependencies from the set.
:param dist: A single distribution to find the dependencies of
:type dist: :class:`pkg_resources.Distribution`
:param working_set: A working set to search for all packages
:type working_set: :class:`pkg_resources.WorkingSet`
:return: A set of distributions which the package depends on, including the package
:rtype: set(:class:`pkg_resources.Distribution`)
"""
deps = set()
deps.add(dist)
try:
reqs = dist.requires()
except (AttributeError, OSError, IOError): # The METADATA file can't be found
return deps
for req in reqs:
dist = working_set.find(req)
deps |= cls.resolve_dist(dist, working_set)
return deps
|
python
|
def resolve_dist(cls, dist, working_set):
"""Given a local distribution and a working set, returns all dependencies from the set.
:param dist: A single distribution to find the dependencies of
:type dist: :class:`pkg_resources.Distribution`
:param working_set: A working set to search for all packages
:type working_set: :class:`pkg_resources.WorkingSet`
:return: A set of distributions which the package depends on, including the package
:rtype: set(:class:`pkg_resources.Distribution`)
"""
deps = set()
deps.add(dist)
try:
reqs = dist.requires()
except (AttributeError, OSError, IOError): # The METADATA file can't be found
return deps
for req in reqs:
dist = working_set.find(req)
deps |= cls.resolve_dist(dist, working_set)
return deps
|
[
"def",
"resolve_dist",
"(",
"cls",
",",
"dist",
",",
"working_set",
")",
":",
"deps",
"=",
"set",
"(",
")",
"deps",
".",
"add",
"(",
"dist",
")",
"try",
":",
"reqs",
"=",
"dist",
".",
"requires",
"(",
")",
"except",
"(",
"AttributeError",
",",
"OSError",
",",
"IOError",
")",
":",
"# The METADATA file can't be found",
"return",
"deps",
"for",
"req",
"in",
"reqs",
":",
"dist",
"=",
"working_set",
".",
"find",
"(",
"req",
")",
"deps",
"|=",
"cls",
".",
"resolve_dist",
"(",
"dist",
",",
"working_set",
")",
"return",
"deps"
] |
Given a local distribution and a working set, returns all dependencies from the set.
:param dist: A single distribution to find the dependencies of
:type dist: :class:`pkg_resources.Distribution`
:param working_set: A working set to search for all packages
:type working_set: :class:`pkg_resources.WorkingSet`
:return: A set of distributions which the package depends on, including the package
:rtype: set(:class:`pkg_resources.Distribution`)
|
[
"Given",
"a",
"local",
"distribution",
"and",
"a",
"working",
"set",
"returns",
"all",
"dependencies",
"from",
"the",
"set",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L74-L94
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.base_paths
|
def base_paths(self):
"""
Returns the context appropriate paths for the environment.
:return: A dictionary of environment specific paths to be used for installation operations
:rtype: dict
.. note:: The implementation of this is borrowed from a combination of pip and
virtualenv and is likely to change at some point in the future.
>>> from pipenv.core import project
>>> from pipenv.environment import Environment
>>> env = Environment(prefix=project.virtualenv_location, is_venv=True, sources=project.sources)
>>> import pprint
>>> pprint.pprint(env.base_paths)
{'PATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin::/bin:/usr/bin',
'PYTHONPATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'data': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'include': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'libdir': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platinclude': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'platlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platstdlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7',
'prefix': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'purelib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'scripts': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin',
'stdlib': '/home/hawk/.pyenv/versions/3.7.1/lib/python3.7'}
"""
prefix = make_posix(self.prefix.as_posix())
install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix'
paths = get_paths(install_scheme, vars={
'base': prefix,
'platbase': prefix,
})
paths["PATH"] = paths["scripts"] + os.pathsep + os.defpath
if "prefix" not in paths:
paths["prefix"] = prefix
purelib = make_posix(get_python_lib(plat_specific=0, prefix=prefix))
platlib = make_posix(get_python_lib(plat_specific=1, prefix=prefix))
if purelib == platlib:
lib_dirs = purelib
else:
lib_dirs = purelib + os.pathsep + platlib
paths["libdir"] = purelib
paths["purelib"] = purelib
paths["platlib"] = platlib
paths['PYTHONPATH'] = os.pathsep.join(["", ".", lib_dirs])
paths["libdirs"] = lib_dirs
return paths
|
python
|
def base_paths(self):
"""
Returns the context appropriate paths for the environment.
:return: A dictionary of environment specific paths to be used for installation operations
:rtype: dict
.. note:: The implementation of this is borrowed from a combination of pip and
virtualenv and is likely to change at some point in the future.
>>> from pipenv.core import project
>>> from pipenv.environment import Environment
>>> env = Environment(prefix=project.virtualenv_location, is_venv=True, sources=project.sources)
>>> import pprint
>>> pprint.pprint(env.base_paths)
{'PATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin::/bin:/usr/bin',
'PYTHONPATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'data': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'include': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'libdir': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platinclude': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'platlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platstdlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7',
'prefix': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'purelib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'scripts': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin',
'stdlib': '/home/hawk/.pyenv/versions/3.7.1/lib/python3.7'}
"""
prefix = make_posix(self.prefix.as_posix())
install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix'
paths = get_paths(install_scheme, vars={
'base': prefix,
'platbase': prefix,
})
paths["PATH"] = paths["scripts"] + os.pathsep + os.defpath
if "prefix" not in paths:
paths["prefix"] = prefix
purelib = make_posix(get_python_lib(plat_specific=0, prefix=prefix))
platlib = make_posix(get_python_lib(plat_specific=1, prefix=prefix))
if purelib == platlib:
lib_dirs = purelib
else:
lib_dirs = purelib + os.pathsep + platlib
paths["libdir"] = purelib
paths["purelib"] = purelib
paths["platlib"] = platlib
paths['PYTHONPATH'] = os.pathsep.join(["", ".", lib_dirs])
paths["libdirs"] = lib_dirs
return paths
|
[
"def",
"base_paths",
"(",
"self",
")",
":",
"prefix",
"=",
"make_posix",
"(",
"self",
".",
"prefix",
".",
"as_posix",
"(",
")",
")",
"install_scheme",
"=",
"'nt'",
"if",
"(",
"os",
".",
"name",
"==",
"'nt'",
")",
"else",
"'posix_prefix'",
"paths",
"=",
"get_paths",
"(",
"install_scheme",
",",
"vars",
"=",
"{",
"'base'",
":",
"prefix",
",",
"'platbase'",
":",
"prefix",
",",
"}",
")",
"paths",
"[",
"\"PATH\"",
"]",
"=",
"paths",
"[",
"\"scripts\"",
"]",
"+",
"os",
".",
"pathsep",
"+",
"os",
".",
"defpath",
"if",
"\"prefix\"",
"not",
"in",
"paths",
":",
"paths",
"[",
"\"prefix\"",
"]",
"=",
"prefix",
"purelib",
"=",
"make_posix",
"(",
"get_python_lib",
"(",
"plat_specific",
"=",
"0",
",",
"prefix",
"=",
"prefix",
")",
")",
"platlib",
"=",
"make_posix",
"(",
"get_python_lib",
"(",
"plat_specific",
"=",
"1",
",",
"prefix",
"=",
"prefix",
")",
")",
"if",
"purelib",
"==",
"platlib",
":",
"lib_dirs",
"=",
"purelib",
"else",
":",
"lib_dirs",
"=",
"purelib",
"+",
"os",
".",
"pathsep",
"+",
"platlib",
"paths",
"[",
"\"libdir\"",
"]",
"=",
"purelib",
"paths",
"[",
"\"purelib\"",
"]",
"=",
"purelib",
"paths",
"[",
"\"platlib\"",
"]",
"=",
"platlib",
"paths",
"[",
"'PYTHONPATH'",
"]",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"[",
"\"\"",
",",
"\".\"",
",",
"lib_dirs",
"]",
")",
"paths",
"[",
"\"libdirs\"",
"]",
"=",
"lib_dirs",
"return",
"paths"
] |
Returns the context appropriate paths for the environment.
:return: A dictionary of environment specific paths to be used for installation operations
:rtype: dict
.. note:: The implementation of this is borrowed from a combination of pip and
virtualenv and is likely to change at some point in the future.
>>> from pipenv.core import project
>>> from pipenv.environment import Environment
>>> env = Environment(prefix=project.virtualenv_location, is_venv=True, sources=project.sources)
>>> import pprint
>>> pprint.pprint(env.base_paths)
{'PATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin::/bin:/usr/bin',
'PYTHONPATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'data': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'include': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'libdir': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platinclude': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'platlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platstdlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7',
'prefix': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'purelib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'scripts': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin',
'stdlib': '/home/hawk/.pyenv/versions/3.7.1/lib/python3.7'}
|
[
"Returns",
"the",
"context",
"appropriate",
"paths",
"for",
"the",
"environment",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L124-L173
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.python
|
def python(self):
"""Path to the environment python"""
py = vistir.compat.Path(self.base_paths["scripts"]).joinpath("python").absolute().as_posix()
if not py:
return vistir.compat.Path(sys.executable).as_posix()
return py
|
python
|
def python(self):
"""Path to the environment python"""
py = vistir.compat.Path(self.base_paths["scripts"]).joinpath("python").absolute().as_posix()
if not py:
return vistir.compat.Path(sys.executable).as_posix()
return py
|
[
"def",
"python",
"(",
"self",
")",
":",
"py",
"=",
"vistir",
".",
"compat",
".",
"Path",
"(",
"self",
".",
"base_paths",
"[",
"\"scripts\"",
"]",
")",
".",
"joinpath",
"(",
"\"python\"",
")",
".",
"absolute",
"(",
")",
".",
"as_posix",
"(",
")",
"if",
"not",
"py",
":",
"return",
"vistir",
".",
"compat",
".",
"Path",
"(",
"sys",
".",
"executable",
")",
".",
"as_posix",
"(",
")",
"return",
"py"
] |
Path to the environment python
|
[
"Path",
"to",
"the",
"environment",
"python"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L182-L187
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.sys_path
|
def sys_path(self):
"""
The system path inside the environment
:return: The :data:`sys.path` from the environment
:rtype: list
"""
from .vendor.vistir.compat import JSONDecodeError
current_executable = vistir.compat.Path(sys.executable).as_posix()
if not self.python or self.python == current_executable:
return sys.path
elif any([sys.prefix == self.prefix, not self.is_venv]):
return sys.path
cmd_args = [self.python, "-c", "import json, sys; print(json.dumps(sys.path))"]
path, _ = vistir.misc.run(cmd_args, return_object=False, nospin=True, block=True, combine_stderr=False, write_to_stdout=False)
try:
path = json.loads(path.strip())
except JSONDecodeError:
path = sys.path
return path
|
python
|
def sys_path(self):
"""
The system path inside the environment
:return: The :data:`sys.path` from the environment
:rtype: list
"""
from .vendor.vistir.compat import JSONDecodeError
current_executable = vistir.compat.Path(sys.executable).as_posix()
if not self.python or self.python == current_executable:
return sys.path
elif any([sys.prefix == self.prefix, not self.is_venv]):
return sys.path
cmd_args = [self.python, "-c", "import json, sys; print(json.dumps(sys.path))"]
path, _ = vistir.misc.run(cmd_args, return_object=False, nospin=True, block=True, combine_stderr=False, write_to_stdout=False)
try:
path = json.loads(path.strip())
except JSONDecodeError:
path = sys.path
return path
|
[
"def",
"sys_path",
"(",
"self",
")",
":",
"from",
".",
"vendor",
".",
"vistir",
".",
"compat",
"import",
"JSONDecodeError",
"current_executable",
"=",
"vistir",
".",
"compat",
".",
"Path",
"(",
"sys",
".",
"executable",
")",
".",
"as_posix",
"(",
")",
"if",
"not",
"self",
".",
"python",
"or",
"self",
".",
"python",
"==",
"current_executable",
":",
"return",
"sys",
".",
"path",
"elif",
"any",
"(",
"[",
"sys",
".",
"prefix",
"==",
"self",
".",
"prefix",
",",
"not",
"self",
".",
"is_venv",
"]",
")",
":",
"return",
"sys",
".",
"path",
"cmd_args",
"=",
"[",
"self",
".",
"python",
",",
"\"-c\"",
",",
"\"import json, sys; print(json.dumps(sys.path))\"",
"]",
"path",
",",
"_",
"=",
"vistir",
".",
"misc",
".",
"run",
"(",
"cmd_args",
",",
"return_object",
"=",
"False",
",",
"nospin",
"=",
"True",
",",
"block",
"=",
"True",
",",
"combine_stderr",
"=",
"False",
",",
"write_to_stdout",
"=",
"False",
")",
"try",
":",
"path",
"=",
"json",
".",
"loads",
"(",
"path",
".",
"strip",
"(",
")",
")",
"except",
"JSONDecodeError",
":",
"path",
"=",
"sys",
".",
"path",
"return",
"path"
] |
The system path inside the environment
:return: The :data:`sys.path` from the environment
:rtype: list
|
[
"The",
"system",
"path",
"inside",
"the",
"environment"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L190-L210
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.sys_prefix
|
def sys_prefix(self):
"""
The prefix run inside the context of the environment
:return: The python prefix inside the environment
:rtype: :data:`sys.prefix`
"""
command = [self.python, "-c" "import sys; print(sys.prefix)"]
c = vistir.misc.run(command, return_object=True, block=True, nospin=True, write_to_stdout=False)
sys_prefix = vistir.compat.Path(vistir.misc.to_text(c.out).strip()).as_posix()
return sys_prefix
|
python
|
def sys_prefix(self):
"""
The prefix run inside the context of the environment
:return: The python prefix inside the environment
:rtype: :data:`sys.prefix`
"""
command = [self.python, "-c" "import sys; print(sys.prefix)"]
c = vistir.misc.run(command, return_object=True, block=True, nospin=True, write_to_stdout=False)
sys_prefix = vistir.compat.Path(vistir.misc.to_text(c.out).strip()).as_posix()
return sys_prefix
|
[
"def",
"sys_prefix",
"(",
"self",
")",
":",
"command",
"=",
"[",
"self",
".",
"python",
",",
"\"-c\"",
"\"import sys; print(sys.prefix)\"",
"]",
"c",
"=",
"vistir",
".",
"misc",
".",
"run",
"(",
"command",
",",
"return_object",
"=",
"True",
",",
"block",
"=",
"True",
",",
"nospin",
"=",
"True",
",",
"write_to_stdout",
"=",
"False",
")",
"sys_prefix",
"=",
"vistir",
".",
"compat",
".",
"Path",
"(",
"vistir",
".",
"misc",
".",
"to_text",
"(",
"c",
".",
"out",
")",
".",
"strip",
"(",
")",
")",
".",
"as_posix",
"(",
")",
"return",
"sys_prefix"
] |
The prefix run inside the context of the environment
:return: The python prefix inside the environment
:rtype: :data:`sys.prefix`
|
[
"The",
"prefix",
"run",
"inside",
"the",
"context",
"of",
"the",
"environment"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L213-L224
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.pip_version
|
def pip_version(self):
"""
Get the pip version in the environment. Useful for knowing which args we can use
when installing.
"""
from .vendor.packaging.version import parse as parse_version
pip = next(iter(
pkg for pkg in self.get_installed_packages() if pkg.key == "pip"
), None)
if pip is not None:
pip_version = parse_version(pip.version)
return parse_version("18.0")
|
python
|
def pip_version(self):
"""
Get the pip version in the environment. Useful for knowing which args we can use
when installing.
"""
from .vendor.packaging.version import parse as parse_version
pip = next(iter(
pkg for pkg in self.get_installed_packages() if pkg.key == "pip"
), None)
if pip is not None:
pip_version = parse_version(pip.version)
return parse_version("18.0")
|
[
"def",
"pip_version",
"(",
"self",
")",
":",
"from",
".",
"vendor",
".",
"packaging",
".",
"version",
"import",
"parse",
"as",
"parse_version",
"pip",
"=",
"next",
"(",
"iter",
"(",
"pkg",
"for",
"pkg",
"in",
"self",
".",
"get_installed_packages",
"(",
")",
"if",
"pkg",
".",
"key",
"==",
"\"pip\"",
")",
",",
"None",
")",
"if",
"pip",
"is",
"not",
"None",
":",
"pip_version",
"=",
"parse_version",
"(",
"pip",
".",
"version",
")",
"return",
"parse_version",
"(",
"\"18.0\"",
")"
] |
Get the pip version in the environment. Useful for knowing which args we can use
when installing.
|
[
"Get",
"the",
"pip",
"version",
"in",
"the",
"environment",
".",
"Useful",
"for",
"knowing",
"which",
"args",
"we",
"can",
"use",
"when",
"installing",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L251-L262
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.get_distributions
|
def get_distributions(self):
"""
Retrives the distributions installed on the library path of the environment
:return: A set of distributions found on the library path
:rtype: iterator
"""
pkg_resources = self.safe_import("pkg_resources")
libdirs = self.base_paths["libdirs"].split(os.pathsep)
dists = (pkg_resources.find_distributions(libdir) for libdir in libdirs)
for dist in itertools.chain.from_iterable(dists):
yield dist
|
python
|
def get_distributions(self):
"""
Retrives the distributions installed on the library path of the environment
:return: A set of distributions found on the library path
:rtype: iterator
"""
pkg_resources = self.safe_import("pkg_resources")
libdirs = self.base_paths["libdirs"].split(os.pathsep)
dists = (pkg_resources.find_distributions(libdir) for libdir in libdirs)
for dist in itertools.chain.from_iterable(dists):
yield dist
|
[
"def",
"get_distributions",
"(",
"self",
")",
":",
"pkg_resources",
"=",
"self",
".",
"safe_import",
"(",
"\"pkg_resources\"",
")",
"libdirs",
"=",
"self",
".",
"base_paths",
"[",
"\"libdirs\"",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"dists",
"=",
"(",
"pkg_resources",
".",
"find_distributions",
"(",
"libdir",
")",
"for",
"libdir",
"in",
"libdirs",
")",
"for",
"dist",
"in",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"dists",
")",
":",
"yield",
"dist"
] |
Retrives the distributions installed on the library path of the environment
:return: A set of distributions found on the library path
:rtype: iterator
|
[
"Retrives",
"the",
"distributions",
"installed",
"on",
"the",
"library",
"path",
"of",
"the",
"environment"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L264-L276
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.find_egg
|
def find_egg(self, egg_dist):
"""Find an egg by name in the given environment"""
site_packages = self.libdir[1]
search_filename = "{0}.egg-link".format(egg_dist.project_name)
try:
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
search_locations = [site_packages, user_site]
for site_directory in search_locations:
egg = os.path.join(site_directory, search_filename)
if os.path.isfile(egg):
return egg
|
python
|
def find_egg(self, egg_dist):
"""Find an egg by name in the given environment"""
site_packages = self.libdir[1]
search_filename = "{0}.egg-link".format(egg_dist.project_name)
try:
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
search_locations = [site_packages, user_site]
for site_directory in search_locations:
egg = os.path.join(site_directory, search_filename)
if os.path.isfile(egg):
return egg
|
[
"def",
"find_egg",
"(",
"self",
",",
"egg_dist",
")",
":",
"site_packages",
"=",
"self",
".",
"libdir",
"[",
"1",
"]",
"search_filename",
"=",
"\"{0}.egg-link\"",
".",
"format",
"(",
"egg_dist",
".",
"project_name",
")",
"try",
":",
"user_site",
"=",
"site",
".",
"getusersitepackages",
"(",
")",
"except",
"AttributeError",
":",
"user_site",
"=",
"site",
".",
"USER_SITE",
"search_locations",
"=",
"[",
"site_packages",
",",
"user_site",
"]",
"for",
"site_directory",
"in",
"search_locations",
":",
"egg",
"=",
"os",
".",
"path",
".",
"join",
"(",
"site_directory",
",",
"search_filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"egg",
")",
":",
"return",
"egg"
] |
Find an egg by name in the given environment
|
[
"Find",
"an",
"egg",
"by",
"name",
"in",
"the",
"given",
"environment"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L278-L290
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.dist_is_in_project
|
def dist_is_in_project(self, dist):
"""Determine whether the supplied distribution is in the environment."""
from .project import _normalized
prefixes = [
_normalized(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep)
if _normalized(prefix).startswith(_normalized(self.prefix.as_posix()))
]
location = self.locate_dist(dist)
if not location:
return False
location = _normalized(make_posix(location))
return any(location.startswith(prefix) for prefix in prefixes)
|
python
|
def dist_is_in_project(self, dist):
"""Determine whether the supplied distribution is in the environment."""
from .project import _normalized
prefixes = [
_normalized(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep)
if _normalized(prefix).startswith(_normalized(self.prefix.as_posix()))
]
location = self.locate_dist(dist)
if not location:
return False
location = _normalized(make_posix(location))
return any(location.startswith(prefix) for prefix in prefixes)
|
[
"def",
"dist_is_in_project",
"(",
"self",
",",
"dist",
")",
":",
"from",
".",
"project",
"import",
"_normalized",
"prefixes",
"=",
"[",
"_normalized",
"(",
"prefix",
")",
"for",
"prefix",
"in",
"self",
".",
"base_paths",
"[",
"\"libdirs\"",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"if",
"_normalized",
"(",
"prefix",
")",
".",
"startswith",
"(",
"_normalized",
"(",
"self",
".",
"prefix",
".",
"as_posix",
"(",
")",
")",
")",
"]",
"location",
"=",
"self",
".",
"locate_dist",
"(",
"dist",
")",
"if",
"not",
"location",
":",
"return",
"False",
"location",
"=",
"_normalized",
"(",
"make_posix",
"(",
"location",
")",
")",
"return",
"any",
"(",
"location",
".",
"startswith",
"(",
"prefix",
")",
"for",
"prefix",
"in",
"prefixes",
")"
] |
Determine whether the supplied distribution is in the environment.
|
[
"Determine",
"whether",
"the",
"supplied",
"distribution",
"is",
"in",
"the",
"environment",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L300-L311
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.is_installed
|
def is_installed(self, pkgname):
"""Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
"""
return any(d for d in self.get_distributions() if d.project_name == pkgname)
|
python
|
def is_installed(self, pkgname):
"""Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
"""
return any(d for d in self.get_distributions() if d.project_name == pkgname)
|
[
"def",
"is_installed",
"(",
"self",
",",
"pkgname",
")",
":",
"return",
"any",
"(",
"d",
"for",
"d",
"in",
"self",
".",
"get_distributions",
"(",
")",
"if",
"d",
".",
"project_name",
"==",
"pkgname",
")"
] |
Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
|
[
"Given",
"a",
"package",
"name",
"returns",
"whether",
"it",
"is",
"installed",
"in",
"the",
"environment"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L481-L489
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.run_py
|
def run_py(self, cmd, cwd=os.curdir):
"""Run a python command in the enviornment context.
:param cmd: A command to run in the environment - runs with `python -c`
:type cmd: str or list
:param str cwd: The working directory in which to execute the command, defaults to :data:`os.curdir`
:return: A finished command object
:rtype: :class:`~subprocess.Popen`
"""
c = None
if isinstance(cmd, six.string_types):
script = vistir.cmdparse.Script.parse("{0} -c {1}".format(self.python, cmd))
else:
script = vistir.cmdparse.Script.parse([self.python, "-c"] + list(cmd))
with self.activated():
c = vistir.misc.run(script._parts, return_object=True, nospin=True, cwd=cwd, write_to_stdout=False)
return c
|
python
|
def run_py(self, cmd, cwd=os.curdir):
"""Run a python command in the enviornment context.
:param cmd: A command to run in the environment - runs with `python -c`
:type cmd: str or list
:param str cwd: The working directory in which to execute the command, defaults to :data:`os.curdir`
:return: A finished command object
:rtype: :class:`~subprocess.Popen`
"""
c = None
if isinstance(cmd, six.string_types):
script = vistir.cmdparse.Script.parse("{0} -c {1}".format(self.python, cmd))
else:
script = vistir.cmdparse.Script.parse([self.python, "-c"] + list(cmd))
with self.activated():
c = vistir.misc.run(script._parts, return_object=True, nospin=True, cwd=cwd, write_to_stdout=False)
return c
|
[
"def",
"run_py",
"(",
"self",
",",
"cmd",
",",
"cwd",
"=",
"os",
".",
"curdir",
")",
":",
"c",
"=",
"None",
"if",
"isinstance",
"(",
"cmd",
",",
"six",
".",
"string_types",
")",
":",
"script",
"=",
"vistir",
".",
"cmdparse",
".",
"Script",
".",
"parse",
"(",
"\"{0} -c {1}\"",
".",
"format",
"(",
"self",
".",
"python",
",",
"cmd",
")",
")",
"else",
":",
"script",
"=",
"vistir",
".",
"cmdparse",
".",
"Script",
".",
"parse",
"(",
"[",
"self",
".",
"python",
",",
"\"-c\"",
"]",
"+",
"list",
"(",
"cmd",
")",
")",
"with",
"self",
".",
"activated",
"(",
")",
":",
"c",
"=",
"vistir",
".",
"misc",
".",
"run",
"(",
"script",
".",
"_parts",
",",
"return_object",
"=",
"True",
",",
"nospin",
"=",
"True",
",",
"cwd",
"=",
"cwd",
",",
"write_to_stdout",
"=",
"False",
")",
"return",
"c"
] |
Run a python command in the enviornment context.
:param cmd: A command to run in the environment - runs with `python -c`
:type cmd: str or list
:param str cwd: The working directory in which to execute the command, defaults to :data:`os.curdir`
:return: A finished command object
:rtype: :class:`~subprocess.Popen`
|
[
"Run",
"a",
"python",
"command",
"in",
"the",
"enviornment",
"context",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L507-L524
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.run_activate_this
|
def run_activate_this(self):
"""Runs the environment's inline activation script"""
if self.is_venv:
activate_this = os.path.join(self.scripts_dir, "activate_this.py")
if not os.path.isfile(activate_this):
raise OSError("No such file: {0!s}".format(activate_this))
with open(activate_this, "r") as f:
code = compile(f.read(), activate_this, "exec")
exec(code, dict(__file__=activate_this))
|
python
|
def run_activate_this(self):
"""Runs the environment's inline activation script"""
if self.is_venv:
activate_this = os.path.join(self.scripts_dir, "activate_this.py")
if not os.path.isfile(activate_this):
raise OSError("No such file: {0!s}".format(activate_this))
with open(activate_this, "r") as f:
code = compile(f.read(), activate_this, "exec")
exec(code, dict(__file__=activate_this))
|
[
"def",
"run_activate_this",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_venv",
":",
"activate_this",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"scripts_dir",
",",
"\"activate_this.py\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"activate_this",
")",
":",
"raise",
"OSError",
"(",
"\"No such file: {0!s}\"",
".",
"format",
"(",
"activate_this",
")",
")",
"with",
"open",
"(",
"activate_this",
",",
"\"r\"",
")",
"as",
"f",
":",
"code",
"=",
"compile",
"(",
"f",
".",
"read",
"(",
")",
",",
"activate_this",
",",
"\"exec\"",
")",
"exec",
"(",
"code",
",",
"dict",
"(",
"__file__",
"=",
"activate_this",
")",
")"
] |
Runs the environment's inline activation script
|
[
"Runs",
"the",
"environment",
"s",
"inline",
"activation",
"script"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L526-L534
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.activated
|
def activated(self, include_extras=True, extra_dists=None):
"""Helper context manager to activate the environment.
This context manager will set the following variables for the duration
of its activation:
* sys.prefix
* sys.path
* os.environ["VIRTUAL_ENV"]
* os.environ["PATH"]
In addition, it will make any distributions passed into `extra_dists` available
on `sys.path` while inside the context manager, as well as making `passa` itself
available.
The environment's `prefix` as well as `scripts_dir` properties are both prepended
to `os.environ["PATH"]` to ensure that calls to `~Environment.run()` use the
environment's path preferentially.
"""
if not extra_dists:
extra_dists = []
original_path = sys.path
original_prefix = sys.prefix
parent_path = vistir.compat.Path(__file__).absolute().parent
vendor_dir = parent_path.joinpath("vendor").as_posix()
patched_dir = parent_path.joinpath("patched").as_posix()
parent_path = parent_path.as_posix()
self.add_dist("pip")
prefix = self.prefix.as_posix()
with vistir.contextmanagers.temp_environ(), vistir.contextmanagers.temp_path():
os.environ["PATH"] = os.pathsep.join([
vistir.compat.fs_str(self.scripts_dir),
vistir.compat.fs_str(self.prefix.as_posix()),
os.environ.get("PATH", "")
])
os.environ["PYTHONIOENCODING"] = vistir.compat.fs_str("utf-8")
os.environ["PYTHONDONTWRITEBYTECODE"] = vistir.compat.fs_str("1")
from .environments import PIPENV_USE_SYSTEM
if self.is_venv:
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ["VIRTUAL_ENV"] = vistir.compat.fs_str(prefix)
else:
if not PIPENV_USE_SYSTEM and not os.environ.get("VIRTUAL_ENV"):
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ.pop("PYTHONHOME", None)
sys.path = self.sys_path
sys.prefix = self.sys_prefix
site.addsitedir(self.base_paths["purelib"])
pip = self.safe_import("pip")
pip_vendor = self.safe_import("pip._vendor")
pep517_dir = os.path.join(os.path.dirname(pip_vendor.__file__), "pep517")
site.addsitedir(pep517_dir)
os.environ["PYTHONPATH"] = os.pathsep.join([
os.environ.get("PYTHONPATH", self.base_paths["PYTHONPATH"]), pep517_dir
])
if include_extras:
site.addsitedir(parent_path)
sys.path.extend([parent_path, patched_dir, vendor_dir])
extra_dists = list(self.extra_dists) + extra_dists
for extra_dist in extra_dists:
if extra_dist not in self.get_working_set():
extra_dist.activate(self.sys_path)
try:
yield
finally:
sys.path = original_path
sys.prefix = original_prefix
six.moves.reload_module(pkg_resources)
|
python
|
def activated(self, include_extras=True, extra_dists=None):
"""Helper context manager to activate the environment.
This context manager will set the following variables for the duration
of its activation:
* sys.prefix
* sys.path
* os.environ["VIRTUAL_ENV"]
* os.environ["PATH"]
In addition, it will make any distributions passed into `extra_dists` available
on `sys.path` while inside the context manager, as well as making `passa` itself
available.
The environment's `prefix` as well as `scripts_dir` properties are both prepended
to `os.environ["PATH"]` to ensure that calls to `~Environment.run()` use the
environment's path preferentially.
"""
if not extra_dists:
extra_dists = []
original_path = sys.path
original_prefix = sys.prefix
parent_path = vistir.compat.Path(__file__).absolute().parent
vendor_dir = parent_path.joinpath("vendor").as_posix()
patched_dir = parent_path.joinpath("patched").as_posix()
parent_path = parent_path.as_posix()
self.add_dist("pip")
prefix = self.prefix.as_posix()
with vistir.contextmanagers.temp_environ(), vistir.contextmanagers.temp_path():
os.environ["PATH"] = os.pathsep.join([
vistir.compat.fs_str(self.scripts_dir),
vistir.compat.fs_str(self.prefix.as_posix()),
os.environ.get("PATH", "")
])
os.environ["PYTHONIOENCODING"] = vistir.compat.fs_str("utf-8")
os.environ["PYTHONDONTWRITEBYTECODE"] = vistir.compat.fs_str("1")
from .environments import PIPENV_USE_SYSTEM
if self.is_venv:
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ["VIRTUAL_ENV"] = vistir.compat.fs_str(prefix)
else:
if not PIPENV_USE_SYSTEM and not os.environ.get("VIRTUAL_ENV"):
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ.pop("PYTHONHOME", None)
sys.path = self.sys_path
sys.prefix = self.sys_prefix
site.addsitedir(self.base_paths["purelib"])
pip = self.safe_import("pip")
pip_vendor = self.safe_import("pip._vendor")
pep517_dir = os.path.join(os.path.dirname(pip_vendor.__file__), "pep517")
site.addsitedir(pep517_dir)
os.environ["PYTHONPATH"] = os.pathsep.join([
os.environ.get("PYTHONPATH", self.base_paths["PYTHONPATH"]), pep517_dir
])
if include_extras:
site.addsitedir(parent_path)
sys.path.extend([parent_path, patched_dir, vendor_dir])
extra_dists = list(self.extra_dists) + extra_dists
for extra_dist in extra_dists:
if extra_dist not in self.get_working_set():
extra_dist.activate(self.sys_path)
try:
yield
finally:
sys.path = original_path
sys.prefix = original_prefix
six.moves.reload_module(pkg_resources)
|
[
"def",
"activated",
"(",
"self",
",",
"include_extras",
"=",
"True",
",",
"extra_dists",
"=",
"None",
")",
":",
"if",
"not",
"extra_dists",
":",
"extra_dists",
"=",
"[",
"]",
"original_path",
"=",
"sys",
".",
"path",
"original_prefix",
"=",
"sys",
".",
"prefix",
"parent_path",
"=",
"vistir",
".",
"compat",
".",
"Path",
"(",
"__file__",
")",
".",
"absolute",
"(",
")",
".",
"parent",
"vendor_dir",
"=",
"parent_path",
".",
"joinpath",
"(",
"\"vendor\"",
")",
".",
"as_posix",
"(",
")",
"patched_dir",
"=",
"parent_path",
".",
"joinpath",
"(",
"\"patched\"",
")",
".",
"as_posix",
"(",
")",
"parent_path",
"=",
"parent_path",
".",
"as_posix",
"(",
")",
"self",
".",
"add_dist",
"(",
"\"pip\"",
")",
"prefix",
"=",
"self",
".",
"prefix",
".",
"as_posix",
"(",
")",
"with",
"vistir",
".",
"contextmanagers",
".",
"temp_environ",
"(",
")",
",",
"vistir",
".",
"contextmanagers",
".",
"temp_path",
"(",
")",
":",
"os",
".",
"environ",
"[",
"\"PATH\"",
"]",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"[",
"vistir",
".",
"compat",
".",
"fs_str",
"(",
"self",
".",
"scripts_dir",
")",
",",
"vistir",
".",
"compat",
".",
"fs_str",
"(",
"self",
".",
"prefix",
".",
"as_posix",
"(",
")",
")",
",",
"os",
".",
"environ",
".",
"get",
"(",
"\"PATH\"",
",",
"\"\"",
")",
"]",
")",
"os",
".",
"environ",
"[",
"\"PYTHONIOENCODING\"",
"]",
"=",
"vistir",
".",
"compat",
".",
"fs_str",
"(",
"\"utf-8\"",
")",
"os",
".",
"environ",
"[",
"\"PYTHONDONTWRITEBYTECODE\"",
"]",
"=",
"vistir",
".",
"compat",
".",
"fs_str",
"(",
"\"1\"",
")",
"from",
".",
"environments",
"import",
"PIPENV_USE_SYSTEM",
"if",
"self",
".",
"is_venv",
":",
"os",
".",
"environ",
"[",
"\"PYTHONPATH\"",
"]",
"=",
"self",
".",
"base_paths",
"[",
"\"PYTHONPATH\"",
"]",
"os",
".",
"environ",
"[",
"\"VIRTUAL_ENV\"",
"]",
"=",
"vistir",
".",
"compat",
".",
"fs_str",
"(",
"prefix",
")",
"else",
":",
"if",
"not",
"PIPENV_USE_SYSTEM",
"and",
"not",
"os",
".",
"environ",
".",
"get",
"(",
"\"VIRTUAL_ENV\"",
")",
":",
"os",
".",
"environ",
"[",
"\"PYTHONPATH\"",
"]",
"=",
"self",
".",
"base_paths",
"[",
"\"PYTHONPATH\"",
"]",
"os",
".",
"environ",
".",
"pop",
"(",
"\"PYTHONHOME\"",
",",
"None",
")",
"sys",
".",
"path",
"=",
"self",
".",
"sys_path",
"sys",
".",
"prefix",
"=",
"self",
".",
"sys_prefix",
"site",
".",
"addsitedir",
"(",
"self",
".",
"base_paths",
"[",
"\"purelib\"",
"]",
")",
"pip",
"=",
"self",
".",
"safe_import",
"(",
"\"pip\"",
")",
"pip_vendor",
"=",
"self",
".",
"safe_import",
"(",
"\"pip._vendor\"",
")",
"pep517_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"pip_vendor",
".",
"__file__",
")",
",",
"\"pep517\"",
")",
"site",
".",
"addsitedir",
"(",
"pep517_dir",
")",
"os",
".",
"environ",
"[",
"\"PYTHONPATH\"",
"]",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"[",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYTHONPATH\"",
",",
"self",
".",
"base_paths",
"[",
"\"PYTHONPATH\"",
"]",
")",
",",
"pep517_dir",
"]",
")",
"if",
"include_extras",
":",
"site",
".",
"addsitedir",
"(",
"parent_path",
")",
"sys",
".",
"path",
".",
"extend",
"(",
"[",
"parent_path",
",",
"patched_dir",
",",
"vendor_dir",
"]",
")",
"extra_dists",
"=",
"list",
"(",
"self",
".",
"extra_dists",
")",
"+",
"extra_dists",
"for",
"extra_dist",
"in",
"extra_dists",
":",
"if",
"extra_dist",
"not",
"in",
"self",
".",
"get_working_set",
"(",
")",
":",
"extra_dist",
".",
"activate",
"(",
"self",
".",
"sys_path",
")",
"try",
":",
"yield",
"finally",
":",
"sys",
".",
"path",
"=",
"original_path",
"sys",
".",
"prefix",
"=",
"original_prefix",
"six",
".",
"moves",
".",
"reload_module",
"(",
"pkg_resources",
")"
] |
Helper context manager to activate the environment.
This context manager will set the following variables for the duration
of its activation:
* sys.prefix
* sys.path
* os.environ["VIRTUAL_ENV"]
* os.environ["PATH"]
In addition, it will make any distributions passed into `extra_dists` available
on `sys.path` while inside the context manager, as well as making `passa` itself
available.
The environment's `prefix` as well as `scripts_dir` properties are both prepended
to `os.environ["PATH"]` to ensure that calls to `~Environment.run()` use the
environment's path preferentially.
|
[
"Helper",
"context",
"manager",
"to",
"activate",
"the",
"environment",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L537-L605
|
train
|
pypa/pipenv
|
pipenv/environment.py
|
Environment.uninstall
|
def uninstall(self, pkgname, *args, **kwargs):
"""A context manager which allows uninstallation of packages from the environment
:param str pkgname: The name of a package to uninstall
>>> env = Environment("/path/to/env/root")
>>> with env.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller:
cleaned = uninstaller.paths
>>> if cleaned:
print("uninstalled packages: %s" % cleaned)
"""
auto_confirm = kwargs.pop("auto_confirm", True)
verbose = kwargs.pop("verbose", False)
with self.activated():
monkey_patch = next(iter(
dist for dist in self.base_working_set
if dist.project_name == "recursive-monkey-patch"
), None)
if monkey_patch:
monkey_patch.activate()
pip_shims = self.safe_import("pip_shims")
pathset_base = pip_shims.UninstallPathSet
pathset_base._permitted = PatchedUninstaller._permitted
dist = next(
iter(filter(lambda d: d.project_name == pkgname, self.get_working_set())),
None
)
pathset = pathset_base.from_dist(dist)
if pathset is not None:
pathset.remove(auto_confirm=auto_confirm, verbose=verbose)
try:
yield pathset
except Exception as e:
if pathset is not None:
pathset.rollback()
else:
if pathset is not None:
pathset.commit()
if pathset is None:
return
|
python
|
def uninstall(self, pkgname, *args, **kwargs):
"""A context manager which allows uninstallation of packages from the environment
:param str pkgname: The name of a package to uninstall
>>> env = Environment("/path/to/env/root")
>>> with env.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller:
cleaned = uninstaller.paths
>>> if cleaned:
print("uninstalled packages: %s" % cleaned)
"""
auto_confirm = kwargs.pop("auto_confirm", True)
verbose = kwargs.pop("verbose", False)
with self.activated():
monkey_patch = next(iter(
dist for dist in self.base_working_set
if dist.project_name == "recursive-monkey-patch"
), None)
if monkey_patch:
monkey_patch.activate()
pip_shims = self.safe_import("pip_shims")
pathset_base = pip_shims.UninstallPathSet
pathset_base._permitted = PatchedUninstaller._permitted
dist = next(
iter(filter(lambda d: d.project_name == pkgname, self.get_working_set())),
None
)
pathset = pathset_base.from_dist(dist)
if pathset is not None:
pathset.remove(auto_confirm=auto_confirm, verbose=verbose)
try:
yield pathset
except Exception as e:
if pathset is not None:
pathset.rollback()
else:
if pathset is not None:
pathset.commit()
if pathset is None:
return
|
[
"def",
"uninstall",
"(",
"self",
",",
"pkgname",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"auto_confirm",
"=",
"kwargs",
".",
"pop",
"(",
"\"auto_confirm\"",
",",
"True",
")",
"verbose",
"=",
"kwargs",
".",
"pop",
"(",
"\"verbose\"",
",",
"False",
")",
"with",
"self",
".",
"activated",
"(",
")",
":",
"monkey_patch",
"=",
"next",
"(",
"iter",
"(",
"dist",
"for",
"dist",
"in",
"self",
".",
"base_working_set",
"if",
"dist",
".",
"project_name",
"==",
"\"recursive-monkey-patch\"",
")",
",",
"None",
")",
"if",
"monkey_patch",
":",
"monkey_patch",
".",
"activate",
"(",
")",
"pip_shims",
"=",
"self",
".",
"safe_import",
"(",
"\"pip_shims\"",
")",
"pathset_base",
"=",
"pip_shims",
".",
"UninstallPathSet",
"pathset_base",
".",
"_permitted",
"=",
"PatchedUninstaller",
".",
"_permitted",
"dist",
"=",
"next",
"(",
"iter",
"(",
"filter",
"(",
"lambda",
"d",
":",
"d",
".",
"project_name",
"==",
"pkgname",
",",
"self",
".",
"get_working_set",
"(",
")",
")",
")",
",",
"None",
")",
"pathset",
"=",
"pathset_base",
".",
"from_dist",
"(",
"dist",
")",
"if",
"pathset",
"is",
"not",
"None",
":",
"pathset",
".",
"remove",
"(",
"auto_confirm",
"=",
"auto_confirm",
",",
"verbose",
"=",
"verbose",
")",
"try",
":",
"yield",
"pathset",
"except",
"Exception",
"as",
"e",
":",
"if",
"pathset",
"is",
"not",
"None",
":",
"pathset",
".",
"rollback",
"(",
")",
"else",
":",
"if",
"pathset",
"is",
"not",
"None",
":",
"pathset",
".",
"commit",
"(",
")",
"if",
"pathset",
"is",
"None",
":",
"return"
] |
A context manager which allows uninstallation of packages from the environment
:param str pkgname: The name of a package to uninstall
>>> env = Environment("/path/to/env/root")
>>> with env.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller:
cleaned = uninstaller.paths
>>> if cleaned:
print("uninstalled packages: %s" % cleaned)
|
[
"A",
"context",
"manager",
"which",
"allows",
"uninstallation",
"of",
"packages",
"from",
"the",
"environment"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/environment.py#L673-L713
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
stn
|
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
|
python
|
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
|
[
"def",
"stn",
"(",
"s",
",",
"length",
",",
"encoding",
",",
"errors",
")",
":",
"s",
"=",
"s",
".",
"encode",
"(",
"encoding",
",",
"errors",
")",
"return",
"s",
"[",
":",
"length",
"]",
"+",
"(",
"length",
"-",
"len",
"(",
"s",
")",
")",
"*",
"NUL"
] |
Convert a string to a null-terminated bytes object.
|
[
"Convert",
"a",
"string",
"to",
"a",
"null",
"-",
"terminated",
"bytes",
"object",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L185-L189
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
nts
|
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
|
python
|
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
|
[
"def",
"nts",
"(",
"s",
",",
"encoding",
",",
"errors",
")",
":",
"p",
"=",
"s",
".",
"find",
"(",
"b\"\\0\"",
")",
"if",
"p",
"!=",
"-",
"1",
":",
"s",
"=",
"s",
"[",
":",
"p",
"]",
"return",
"s",
".",
"decode",
"(",
"encoding",
",",
"errors",
")"
] |
Convert a null-terminated bytes object to a string.
|
[
"Convert",
"a",
"null",
"-",
"terminated",
"bytes",
"object",
"to",
"a",
"string",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L191-L197
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
nti
|
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
|
python
|
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
|
[
"def",
"nti",
"(",
"s",
")",
":",
"# There are two possible encodings for a number field, see",
"# itn() below.",
"if",
"s",
"[",
"0",
"]",
"!=",
"chr",
"(",
"0o200",
")",
":",
"try",
":",
"n",
"=",
"int",
"(",
"nts",
"(",
"s",
",",
"\"ascii\"",
",",
"\"strict\"",
")",
"or",
"\"0\"",
",",
"8",
")",
"except",
"ValueError",
":",
"raise",
"InvalidHeaderError",
"(",
"\"invalid header\"",
")",
"else",
":",
"n",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"s",
")",
"-",
"1",
")",
":",
"n",
"<<=",
"8",
"n",
"+=",
"ord",
"(",
"s",
"[",
"i",
"+",
"1",
"]",
")",
"return",
"n"
] |
Convert a number field to a python number.
|
[
"Convert",
"a",
"number",
"field",
"to",
"a",
"python",
"number",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L199-L214
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
itn
|
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
|
python
|
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
|
[
"def",
"itn",
"(",
"n",
",",
"digits",
"=",
"8",
",",
"format",
"=",
"DEFAULT_FORMAT",
")",
":",
"# POSIX 1003.1-1988 requires numbers to be encoded as a string of",
"# octal digits followed by a null-byte, this allows values up to",
"# (8**(digits-1))-1. GNU tar allows storing numbers greater than",
"# that if necessary. A leading 0o200 byte indicates this particular",
"# encoding, the following digits-1 bytes are a big-endian",
"# representation. This allows values up to (256**(digits-1))-1.",
"if",
"0",
"<=",
"n",
"<",
"8",
"**",
"(",
"digits",
"-",
"1",
")",
":",
"s",
"=",
"(",
"\"%0*o\"",
"%",
"(",
"digits",
"-",
"1",
",",
"n",
")",
")",
".",
"encode",
"(",
"\"ascii\"",
")",
"+",
"NUL",
"else",
":",
"if",
"format",
"!=",
"GNU_FORMAT",
"or",
"n",
">=",
"256",
"**",
"(",
"digits",
"-",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"overflow in number field\"",
")",
"if",
"n",
"<",
"0",
":",
"# XXX We mimic GNU tar's behaviour with negative numbers,",
"# this could raise OverflowError.",
"n",
"=",
"struct",
".",
"unpack",
"(",
"\"L\"",
",",
"struct",
".",
"pack",
"(",
"\"l\"",
",",
"n",
")",
")",
"[",
"0",
"]",
"s",
"=",
"bytearray",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"digits",
"-",
"1",
")",
":",
"s",
".",
"insert",
"(",
"0",
",",
"n",
"&",
"0o377",
")",
"n",
">>=",
"8",
"s",
".",
"insert",
"(",
"0",
",",
"0o200",
")",
"return",
"s"
] |
Convert a python number to a number field.
|
[
"Convert",
"a",
"python",
"number",
"to",
"a",
"number",
"field",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L216-L241
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
calc_chksums
|
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
|
python
|
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
|
[
"def",
"calc_chksums",
"(",
"buf",
")",
":",
"unsigned_chksum",
"=",
"256",
"+",
"sum",
"(",
"struct",
".",
"unpack",
"(",
"\"148B\"",
",",
"buf",
"[",
":",
"148",
"]",
")",
"+",
"struct",
".",
"unpack",
"(",
"\"356B\"",
",",
"buf",
"[",
"156",
":",
"512",
"]",
")",
")",
"signed_chksum",
"=",
"256",
"+",
"sum",
"(",
"struct",
".",
"unpack",
"(",
"\"148b\"",
",",
"buf",
"[",
":",
"148",
"]",
")",
"+",
"struct",
".",
"unpack",
"(",
"\"356b\"",
",",
"buf",
"[",
"156",
":",
"512",
"]",
")",
")",
"return",
"unsigned_chksum",
",",
"signed_chksum"
] |
Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
|
[
"Calculate",
"the",
"checksum",
"for",
"a",
"member",
"s",
"header",
"by",
"summing",
"up",
"all",
"characters",
"except",
"for",
"the",
"chksum",
"field",
"which",
"is",
"treated",
"as",
"if",
"it",
"was",
"filled",
"with",
"spaces",
".",
"According",
"to",
"the",
"GNU",
"tar",
"sources",
"some",
"tars",
"(",
"Sun",
"and",
"NeXT",
")",
"calculate",
"chksum",
"with",
"signed",
"char",
"which",
"will",
"be",
"different",
"if",
"there",
"are",
"chars",
"in",
"the",
"buffer",
"with",
"the",
"high",
"bit",
"set",
".",
"So",
"we",
"calculate",
"two",
"checksums",
"unsigned",
"and",
"signed",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L243-L254
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
copyfileobj
|
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
|
python
|
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
|
[
"def",
"copyfileobj",
"(",
"src",
",",
"dst",
",",
"length",
"=",
"None",
")",
":",
"if",
"length",
"==",
"0",
":",
"return",
"if",
"length",
"is",
"None",
":",
"while",
"True",
":",
"buf",
"=",
"src",
".",
"read",
"(",
"16",
"*",
"1024",
")",
"if",
"not",
"buf",
":",
"break",
"dst",
".",
"write",
"(",
"buf",
")",
"return",
"BUFSIZE",
"=",
"16",
"*",
"1024",
"blocks",
",",
"remainder",
"=",
"divmod",
"(",
"length",
",",
"BUFSIZE",
")",
"for",
"b",
"in",
"range",
"(",
"blocks",
")",
":",
"buf",
"=",
"src",
".",
"read",
"(",
"BUFSIZE",
")",
"if",
"len",
"(",
"buf",
")",
"<",
"BUFSIZE",
":",
"raise",
"IOError",
"(",
"\"end of file reached\"",
")",
"dst",
".",
"write",
"(",
"buf",
")",
"if",
"remainder",
"!=",
"0",
":",
"buf",
"=",
"src",
".",
"read",
"(",
"remainder",
")",
"if",
"len",
"(",
"buf",
")",
"<",
"remainder",
":",
"raise",
"IOError",
"(",
"\"end of file reached\"",
")",
"dst",
".",
"write",
"(",
"buf",
")",
"return"
] |
Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
|
[
"Copy",
"length",
"bytes",
"from",
"fileobj",
"src",
"to",
"fileobj",
"dst",
".",
"If",
"length",
"is",
"None",
"copy",
"the",
"entire",
"content",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L256-L283
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
filemode
|
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
|
python
|
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
|
[
"def",
"filemode",
"(",
"mode",
")",
":",
"perm",
"=",
"[",
"]",
"for",
"table",
"in",
"filemode_table",
":",
"for",
"bit",
",",
"char",
"in",
"table",
":",
"if",
"mode",
"&",
"bit",
"==",
"bit",
":",
"perm",
".",
"append",
"(",
"char",
")",
"break",
"else",
":",
"perm",
".",
"append",
"(",
"\"-\"",
")",
"return",
"\"\"",
".",
"join",
"(",
"perm",
")"
] |
Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
|
[
"Convert",
"a",
"file",
"s",
"mode",
"to",
"a",
"string",
"of",
"the",
"form",
"-",
"rwxrwxrwx",
".",
"Used",
"by",
"TarFile",
".",
"list",
"()"
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L312-L325
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
is_tarfile
|
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
|
python
|
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
|
[
"def",
"is_tarfile",
"(",
"name",
")",
":",
"try",
":",
"t",
"=",
"open",
"(",
"name",
")",
"t",
".",
"close",
"(",
")",
"return",
"True",
"except",
"TarError",
":",
"return",
"False"
] |
Return True if name points to a tar archive that we
are able to handle, else return False.
|
[
"Return",
"True",
"if",
"name",
"points",
"to",
"a",
"tar",
"archive",
"that",
"we",
"are",
"able",
"to",
"handle",
"else",
"return",
"False",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L2595-L2604
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream._init_write_gz
|
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
|
python
|
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
|
[
"def",
"_init_write_gz",
"(",
"self",
")",
":",
"self",
".",
"cmp",
"=",
"self",
".",
"zlib",
".",
"compressobj",
"(",
"9",
",",
"self",
".",
"zlib",
".",
"DEFLATED",
",",
"-",
"self",
".",
"zlib",
".",
"MAX_WBITS",
",",
"self",
".",
"zlib",
".",
"DEF_MEM_LEVEL",
",",
"0",
")",
"timestamp",
"=",
"struct",
".",
"pack",
"(",
"\"<L\"",
",",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"self",
".",
"__write",
"(",
"b\"\\037\\213\\010\\010\"",
"+",
"timestamp",
"+",
"b\"\\002\\377\"",
")",
"if",
"self",
".",
"name",
".",
"endswith",
"(",
"\".gz\"",
")",
":",
"self",
".",
"name",
"=",
"self",
".",
"name",
"[",
":",
"-",
"3",
"]",
"# RFC1952 says we must use ISO-8859-1 for the FNAME field.",
"self",
".",
"__write",
"(",
"self",
".",
"name",
".",
"encode",
"(",
"\"iso-8859-1\"",
",",
"\"replace\"",
")",
"+",
"NUL",
")"
] |
Initialize for writing with gzip compression.
|
[
"Initialize",
"for",
"writing",
"with",
"gzip",
"compression",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L455-L467
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream.write
|
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
|
python
|
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
|
[
"def",
"write",
"(",
"self",
",",
"s",
")",
":",
"if",
"self",
".",
"comptype",
"==",
"\"gz\"",
":",
"self",
".",
"crc",
"=",
"self",
".",
"zlib",
".",
"crc32",
"(",
"s",
",",
"self",
".",
"crc",
")",
"self",
".",
"pos",
"+=",
"len",
"(",
"s",
")",
"if",
"self",
".",
"comptype",
"!=",
"\"tar\"",
":",
"s",
"=",
"self",
".",
"cmp",
".",
"compress",
"(",
"s",
")",
"self",
".",
"__write",
"(",
"s",
")"
] |
Write string s to the stream.
|
[
"Write",
"string",
"s",
"to",
"the",
"stream",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L469-L477
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream.__write
|
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
|
python
|
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
|
[
"def",
"__write",
"(",
"self",
",",
"s",
")",
":",
"self",
".",
"buf",
"+=",
"s",
"while",
"len",
"(",
"self",
".",
"buf",
")",
">",
"self",
".",
"bufsize",
":",
"self",
".",
"fileobj",
".",
"write",
"(",
"self",
".",
"buf",
"[",
":",
"self",
".",
"bufsize",
"]",
")",
"self",
".",
"buf",
"=",
"self",
".",
"buf",
"[",
"self",
".",
"bufsize",
":",
"]"
] |
Write string s to the stream if a whole new block
is ready to be written.
|
[
"Write",
"string",
"s",
"to",
"the",
"stream",
"if",
"a",
"whole",
"new",
"block",
"is",
"ready",
"to",
"be",
"written",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L479-L486
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream.close
|
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
|
python
|
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"closed",
":",
"return",
"if",
"self",
".",
"mode",
"==",
"\"w\"",
"and",
"self",
".",
"comptype",
"!=",
"\"tar\"",
":",
"self",
".",
"buf",
"+=",
"self",
".",
"cmp",
".",
"flush",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"\"w\"",
"and",
"self",
".",
"buf",
":",
"self",
".",
"fileobj",
".",
"write",
"(",
"self",
".",
"buf",
")",
"self",
".",
"buf",
"=",
"b\"\"",
"if",
"self",
".",
"comptype",
"==",
"\"gz\"",
":",
"# The native zlib crc is an unsigned 32-bit integer, but",
"# the Python wrapper implicitly casts that to a signed C",
"# long. So, on a 32-bit box self.crc may \"look negative\",",
"# while the same crc on a 64-bit box may \"look positive\".",
"# To avoid irksome warnings from the `struct` module, force",
"# it to look positive on all boxes.",
"self",
".",
"fileobj",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"\"<L\"",
",",
"self",
".",
"crc",
"&",
"0xffffffff",
")",
")",
"self",
".",
"fileobj",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"\"<L\"",
",",
"self",
".",
"pos",
"&",
"0xffffFFFF",
")",
")",
"if",
"not",
"self",
".",
"_extfileobj",
":",
"self",
".",
"fileobj",
".",
"close",
"(",
")",
"self",
".",
"closed",
"=",
"True"
] |
Close the _Stream object. No operation should be
done on it afterwards.
|
[
"Close",
"the",
"_Stream",
"object",
".",
"No",
"operation",
"should",
"be",
"done",
"on",
"it",
"afterwards",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L488-L514
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream._init_read_gz
|
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
|
python
|
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
|
[
"def",
"_init_read_gz",
"(",
"self",
")",
":",
"self",
".",
"cmp",
"=",
"self",
".",
"zlib",
".",
"decompressobj",
"(",
"-",
"self",
".",
"zlib",
".",
"MAX_WBITS",
")",
"self",
".",
"dbuf",
"=",
"b\"\"",
"# taken from gzip.GzipFile with some alterations",
"if",
"self",
".",
"__read",
"(",
"2",
")",
"!=",
"b\"\\037\\213\"",
":",
"raise",
"ReadError",
"(",
"\"not a gzip file\"",
")",
"if",
"self",
".",
"__read",
"(",
"1",
")",
"!=",
"b\"\\010\"",
":",
"raise",
"CompressionError",
"(",
"\"unsupported compression method\"",
")",
"flag",
"=",
"ord",
"(",
"self",
".",
"__read",
"(",
"1",
")",
")",
"self",
".",
"__read",
"(",
"6",
")",
"if",
"flag",
"&",
"4",
":",
"xlen",
"=",
"ord",
"(",
"self",
".",
"__read",
"(",
"1",
")",
")",
"+",
"256",
"*",
"ord",
"(",
"self",
".",
"__read",
"(",
"1",
")",
")",
"self",
".",
"read",
"(",
"xlen",
")",
"if",
"flag",
"&",
"8",
":",
"while",
"True",
":",
"s",
"=",
"self",
".",
"__read",
"(",
"1",
")",
"if",
"not",
"s",
"or",
"s",
"==",
"NUL",
":",
"break",
"if",
"flag",
"&",
"16",
":",
"while",
"True",
":",
"s",
"=",
"self",
".",
"__read",
"(",
"1",
")",
"if",
"not",
"s",
"or",
"s",
"==",
"NUL",
":",
"break",
"if",
"flag",
"&",
"2",
":",
"self",
".",
"__read",
"(",
"2",
")"
] |
Initialize for reading a gzip compressed fileobj.
|
[
"Initialize",
"for",
"reading",
"a",
"gzip",
"compressed",
"fileobj",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L516-L545
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream.seek
|
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
|
python
|
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
|
[
"def",
"seek",
"(",
"self",
",",
"pos",
"=",
"0",
")",
":",
"if",
"pos",
"-",
"self",
".",
"pos",
">=",
"0",
":",
"blocks",
",",
"remainder",
"=",
"divmod",
"(",
"pos",
"-",
"self",
".",
"pos",
",",
"self",
".",
"bufsize",
")",
"for",
"i",
"in",
"range",
"(",
"blocks",
")",
":",
"self",
".",
"read",
"(",
"self",
".",
"bufsize",
")",
"self",
".",
"read",
"(",
"remainder",
")",
"else",
":",
"raise",
"StreamError",
"(",
"\"seeking backwards is not allowed\"",
")",
"return",
"self",
".",
"pos"
] |
Set the stream's file pointer to pos. Negative seeking
is forbidden.
|
[
"Set",
"the",
"stream",
"s",
"file",
"pointer",
"to",
"pos",
".",
"Negative",
"seeking",
"is",
"forbidden",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L552-L563
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream.read
|
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
|
python
|
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
|
[
"def",
"read",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"t",
"=",
"[",
"]",
"while",
"True",
":",
"buf",
"=",
"self",
".",
"_read",
"(",
"self",
".",
"bufsize",
")",
"if",
"not",
"buf",
":",
"break",
"t",
".",
"append",
"(",
"buf",
")",
"buf",
"=",
"\"\"",
".",
"join",
"(",
"t",
")",
"else",
":",
"buf",
"=",
"self",
".",
"_read",
"(",
"size",
")",
"self",
".",
"pos",
"+=",
"len",
"(",
"buf",
")",
"return",
"buf"
] |
Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
|
[
"Return",
"the",
"next",
"size",
"number",
"of",
"bytes",
"from",
"the",
"stream",
".",
"If",
"size",
"is",
"not",
"defined",
"return",
"all",
"bytes",
"of",
"the",
"stream",
"up",
"to",
"EOF",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L565-L581
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream._read
|
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
|
python
|
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
|
[
"def",
"_read",
"(",
"self",
",",
"size",
")",
":",
"if",
"self",
".",
"comptype",
"==",
"\"tar\"",
":",
"return",
"self",
".",
"__read",
"(",
"size",
")",
"c",
"=",
"len",
"(",
"self",
".",
"dbuf",
")",
"while",
"c",
"<",
"size",
":",
"buf",
"=",
"self",
".",
"__read",
"(",
"self",
".",
"bufsize",
")",
"if",
"not",
"buf",
":",
"break",
"try",
":",
"buf",
"=",
"self",
".",
"cmp",
".",
"decompress",
"(",
"buf",
")",
"except",
"IOError",
":",
"raise",
"ReadError",
"(",
"\"invalid compressed data\"",
")",
"self",
".",
"dbuf",
"+=",
"buf",
"c",
"+=",
"len",
"(",
"buf",
")",
"buf",
"=",
"self",
".",
"dbuf",
"[",
":",
"size",
"]",
"self",
".",
"dbuf",
"=",
"self",
".",
"dbuf",
"[",
"size",
":",
"]",
"return",
"buf"
] |
Return size bytes from the stream.
|
[
"Return",
"size",
"bytes",
"from",
"the",
"stream",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L583-L602
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_Stream.__read
|
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
|
python
|
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
|
[
"def",
"__read",
"(",
"self",
",",
"size",
")",
":",
"c",
"=",
"len",
"(",
"self",
".",
"buf",
")",
"while",
"c",
"<",
"size",
":",
"buf",
"=",
"self",
".",
"fileobj",
".",
"read",
"(",
"self",
".",
"bufsize",
")",
"if",
"not",
"buf",
":",
"break",
"self",
".",
"buf",
"+=",
"buf",
"c",
"+=",
"len",
"(",
"buf",
")",
"buf",
"=",
"self",
".",
"buf",
"[",
":",
"size",
"]",
"self",
".",
"buf",
"=",
"self",
".",
"buf",
"[",
"size",
":",
"]",
"return",
"buf"
] |
Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
|
[
"Return",
"size",
"bytes",
"from",
"stream",
".",
"If",
"internal",
"buffer",
"is",
"empty",
"read",
"another",
"block",
"from",
"the",
"stream",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L604-L617
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
_FileInFile.read
|
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
|
python
|
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
|
[
"def",
"read",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"self",
".",
"size",
"-",
"self",
".",
"position",
"else",
":",
"size",
"=",
"min",
"(",
"size",
",",
"self",
".",
"size",
"-",
"self",
".",
"position",
")",
"buf",
"=",
"b\"\"",
"while",
"size",
">",
"0",
":",
"while",
"True",
":",
"data",
",",
"start",
",",
"stop",
",",
"offset",
"=",
"self",
".",
"map",
"[",
"self",
".",
"map_index",
"]",
"if",
"start",
"<=",
"self",
".",
"position",
"<",
"stop",
":",
"break",
"else",
":",
"self",
".",
"map_index",
"+=",
"1",
"if",
"self",
".",
"map_index",
"==",
"len",
"(",
"self",
".",
"map",
")",
":",
"self",
".",
"map_index",
"=",
"0",
"length",
"=",
"min",
"(",
"size",
",",
"stop",
"-",
"self",
".",
"position",
")",
"if",
"data",
":",
"self",
".",
"fileobj",
".",
"seek",
"(",
"offset",
"+",
"(",
"self",
".",
"position",
"-",
"start",
")",
")",
"buf",
"+=",
"self",
".",
"fileobj",
".",
"read",
"(",
"length",
")",
"else",
":",
"buf",
"+=",
"NUL",
"*",
"length",
"size",
"-=",
"length",
"self",
".",
"position",
"+=",
"length",
"return",
"buf"
] |
Read data from the file.
|
[
"Read",
"data",
"from",
"the",
"file",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L752-L778
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
ExFileObject.read
|
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
|
python
|
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
|
[
"def",
"read",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"if",
"self",
".",
"closed",
":",
"raise",
"ValueError",
"(",
"\"I/O operation on closed file\"",
")",
"buf",
"=",
"b\"\"",
"if",
"self",
".",
"buffer",
":",
"if",
"size",
"is",
"None",
":",
"buf",
"=",
"self",
".",
"buffer",
"self",
".",
"buffer",
"=",
"b\"\"",
"else",
":",
"buf",
"=",
"self",
".",
"buffer",
"[",
":",
"size",
"]",
"self",
".",
"buffer",
"=",
"self",
".",
"buffer",
"[",
"size",
":",
"]",
"if",
"size",
"is",
"None",
":",
"buf",
"+=",
"self",
".",
"fileobj",
".",
"read",
"(",
")",
"else",
":",
"buf",
"+=",
"self",
".",
"fileobj",
".",
"read",
"(",
"size",
"-",
"len",
"(",
"buf",
")",
")",
"self",
".",
"position",
"+=",
"len",
"(",
"buf",
")",
"return",
"buf"
] |
Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
|
[
"Read",
"at",
"most",
"size",
"bytes",
"from",
"the",
"file",
".",
"If",
"size",
"is",
"not",
"present",
"or",
"None",
"read",
"all",
"data",
"until",
"EOF",
"is",
"reached",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L810-L832
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
ExFileObject.readline
|
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
|
python
|
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
|
[
"def",
"readline",
"(",
"self",
",",
"size",
"=",
"-",
"1",
")",
":",
"if",
"self",
".",
"closed",
":",
"raise",
"ValueError",
"(",
"\"I/O operation on closed file\"",
")",
"pos",
"=",
"self",
".",
"buffer",
".",
"find",
"(",
"b\"\\n\"",
")",
"+",
"1",
"if",
"pos",
"==",
"0",
":",
"# no newline found.",
"while",
"True",
":",
"buf",
"=",
"self",
".",
"fileobj",
".",
"read",
"(",
"self",
".",
"blocksize",
")",
"self",
".",
"buffer",
"+=",
"buf",
"if",
"not",
"buf",
"or",
"b\"\\n\"",
"in",
"buf",
":",
"pos",
"=",
"self",
".",
"buffer",
".",
"find",
"(",
"b\"\\n\"",
")",
"+",
"1",
"if",
"pos",
"==",
"0",
":",
"# no newline found.",
"pos",
"=",
"len",
"(",
"self",
".",
"buffer",
")",
"break",
"if",
"size",
"!=",
"-",
"1",
":",
"pos",
"=",
"min",
"(",
"size",
",",
"pos",
")",
"buf",
"=",
"self",
".",
"buffer",
"[",
":",
"pos",
"]",
"self",
".",
"buffer",
"=",
"self",
".",
"buffer",
"[",
"pos",
":",
"]",
"self",
".",
"position",
"+=",
"len",
"(",
"buf",
")",
"return",
"buf"
] |
Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
|
[
"Read",
"one",
"entire",
"line",
"from",
"the",
"file",
".",
"If",
"size",
"is",
"present",
"and",
"non",
"-",
"negative",
"return",
"a",
"string",
"with",
"at",
"most",
"that",
"size",
"which",
"may",
"be",
"an",
"incomplete",
"line",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L837-L864
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
ExFileObject.seek
|
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
|
python
|
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
|
[
"def",
"seek",
"(",
"self",
",",
"pos",
",",
"whence",
"=",
"os",
".",
"SEEK_SET",
")",
":",
"if",
"self",
".",
"closed",
":",
"raise",
"ValueError",
"(",
"\"I/O operation on closed file\"",
")",
"if",
"whence",
"==",
"os",
".",
"SEEK_SET",
":",
"self",
".",
"position",
"=",
"min",
"(",
"max",
"(",
"pos",
",",
"0",
")",
",",
"self",
".",
"size",
")",
"elif",
"whence",
"==",
"os",
".",
"SEEK_CUR",
":",
"if",
"pos",
"<",
"0",
":",
"self",
".",
"position",
"=",
"max",
"(",
"self",
".",
"position",
"+",
"pos",
",",
"0",
")",
"else",
":",
"self",
".",
"position",
"=",
"min",
"(",
"self",
".",
"position",
"+",
"pos",
",",
"self",
".",
"size",
")",
"elif",
"whence",
"==",
"os",
".",
"SEEK_END",
":",
"self",
".",
"position",
"=",
"max",
"(",
"min",
"(",
"self",
".",
"size",
"+",
"pos",
",",
"self",
".",
"size",
")",
",",
"0",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid argument\"",
")",
"self",
".",
"buffer",
"=",
"b\"\"",
"self",
".",
"fileobj",
".",
"seek",
"(",
"self",
".",
"position",
")"
] |
Seek to a position in the file.
|
[
"Seek",
"to",
"a",
"position",
"in",
"the",
"file",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L884-L903
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo.get_info
|
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
|
python
|
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
|
[
"def",
"get_info",
"(",
"self",
")",
":",
"info",
"=",
"{",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"mode\"",
":",
"self",
".",
"mode",
"&",
"0o7777",
",",
"\"uid\"",
":",
"self",
".",
"uid",
",",
"\"gid\"",
":",
"self",
".",
"gid",
",",
"\"size\"",
":",
"self",
".",
"size",
",",
"\"mtime\"",
":",
"self",
".",
"mtime",
",",
"\"chksum\"",
":",
"self",
".",
"chksum",
",",
"\"type\"",
":",
"self",
".",
"type",
",",
"\"linkname\"",
":",
"self",
".",
"linkname",
",",
"\"uname\"",
":",
"self",
".",
"uname",
",",
"\"gname\"",
":",
"self",
".",
"gname",
",",
"\"devmajor\"",
":",
"self",
".",
"devmajor",
",",
"\"devminor\"",
":",
"self",
".",
"devminor",
"}",
"if",
"info",
"[",
"\"type\"",
"]",
"==",
"DIRTYPE",
"and",
"not",
"info",
"[",
"\"name\"",
"]",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"info",
"[",
"\"name\"",
"]",
"+=",
"\"/\"",
"return",
"info"
] |
Return the TarInfo's attributes as a dictionary.
|
[
"Return",
"the",
"TarInfo",
"s",
"attributes",
"as",
"a",
"dictionary",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L978-L1000
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo.tobuf
|
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
|
python
|
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
|
[
"def",
"tobuf",
"(",
"self",
",",
"format",
"=",
"DEFAULT_FORMAT",
",",
"encoding",
"=",
"ENCODING",
",",
"errors",
"=",
"\"surrogateescape\"",
")",
":",
"info",
"=",
"self",
".",
"get_info",
"(",
")",
"if",
"format",
"==",
"USTAR_FORMAT",
":",
"return",
"self",
".",
"create_ustar_header",
"(",
"info",
",",
"encoding",
",",
"errors",
")",
"elif",
"format",
"==",
"GNU_FORMAT",
":",
"return",
"self",
".",
"create_gnu_header",
"(",
"info",
",",
"encoding",
",",
"errors",
")",
"elif",
"format",
"==",
"PAX_FORMAT",
":",
"return",
"self",
".",
"create_pax_header",
"(",
"info",
",",
"encoding",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid format\"",
")"
] |
Return a tar header as a string of 512 byte blocks.
|
[
"Return",
"a",
"tar",
"header",
"as",
"a",
"string",
"of",
"512",
"byte",
"blocks",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1002-L1014
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo.create_ustar_header
|
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
|
python
|
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
|
[
"def",
"create_ustar_header",
"(",
"self",
",",
"info",
",",
"encoding",
",",
"errors",
")",
":",
"info",
"[",
"\"magic\"",
"]",
"=",
"POSIX_MAGIC",
"if",
"len",
"(",
"info",
"[",
"\"linkname\"",
"]",
")",
">",
"LENGTH_LINK",
":",
"raise",
"ValueError",
"(",
"\"linkname is too long\"",
")",
"if",
"len",
"(",
"info",
"[",
"\"name\"",
"]",
")",
">",
"LENGTH_NAME",
":",
"info",
"[",
"\"prefix\"",
"]",
",",
"info",
"[",
"\"name\"",
"]",
"=",
"self",
".",
"_posix_split_name",
"(",
"info",
"[",
"\"name\"",
"]",
")",
"return",
"self",
".",
"_create_header",
"(",
"info",
",",
"USTAR_FORMAT",
",",
"encoding",
",",
"errors",
")"
] |
Return the object as a ustar header block.
|
[
"Return",
"the",
"object",
"as",
"a",
"ustar",
"header",
"block",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1016-L1027
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo.create_gnu_header
|
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
|
python
|
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
|
[
"def",
"create_gnu_header",
"(",
"self",
",",
"info",
",",
"encoding",
",",
"errors",
")",
":",
"info",
"[",
"\"magic\"",
"]",
"=",
"GNU_MAGIC",
"buf",
"=",
"b\"\"",
"if",
"len",
"(",
"info",
"[",
"\"linkname\"",
"]",
")",
">",
"LENGTH_LINK",
":",
"buf",
"+=",
"self",
".",
"_create_gnu_long_header",
"(",
"info",
"[",
"\"linkname\"",
"]",
",",
"GNUTYPE_LONGLINK",
",",
"encoding",
",",
"errors",
")",
"if",
"len",
"(",
"info",
"[",
"\"name\"",
"]",
")",
">",
"LENGTH_NAME",
":",
"buf",
"+=",
"self",
".",
"_create_gnu_long_header",
"(",
"info",
"[",
"\"name\"",
"]",
",",
"GNUTYPE_LONGNAME",
",",
"encoding",
",",
"errors",
")",
"return",
"buf",
"+",
"self",
".",
"_create_header",
"(",
"info",
",",
"GNU_FORMAT",
",",
"encoding",
",",
"errors",
")"
] |
Return the object as a GNU header block sequence.
|
[
"Return",
"the",
"object",
"as",
"a",
"GNU",
"header",
"block",
"sequence",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1029-L1041
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo.create_pax_header
|
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
|
python
|
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
|
[
"def",
"create_pax_header",
"(",
"self",
",",
"info",
",",
"encoding",
")",
":",
"info",
"[",
"\"magic\"",
"]",
"=",
"POSIX_MAGIC",
"pax_headers",
"=",
"self",
".",
"pax_headers",
".",
"copy",
"(",
")",
"# Test string fields for values that exceed the field length or cannot",
"# be represented in ASCII encoding.",
"for",
"name",
",",
"hname",
",",
"length",
"in",
"(",
"(",
"\"name\"",
",",
"\"path\"",
",",
"LENGTH_NAME",
")",
",",
"(",
"\"linkname\"",
",",
"\"linkpath\"",
",",
"LENGTH_LINK",
")",
",",
"(",
"\"uname\"",
",",
"\"uname\"",
",",
"32",
")",
",",
"(",
"\"gname\"",
",",
"\"gname\"",
",",
"32",
")",
")",
":",
"if",
"hname",
"in",
"pax_headers",
":",
"# The pax header has priority.",
"continue",
"# Try to encode the string as ASCII.",
"try",
":",
"info",
"[",
"name",
"]",
".",
"encode",
"(",
"\"ascii\"",
",",
"\"strict\"",
")",
"except",
"UnicodeEncodeError",
":",
"pax_headers",
"[",
"hname",
"]",
"=",
"info",
"[",
"name",
"]",
"continue",
"if",
"len",
"(",
"info",
"[",
"name",
"]",
")",
">",
"length",
":",
"pax_headers",
"[",
"hname",
"]",
"=",
"info",
"[",
"name",
"]",
"# Test number fields for values that exceed the field limit or values",
"# that like to be stored as float.",
"for",
"name",
",",
"digits",
"in",
"(",
"(",
"\"uid\"",
",",
"8",
")",
",",
"(",
"\"gid\"",
",",
"8",
")",
",",
"(",
"\"size\"",
",",
"12",
")",
",",
"(",
"\"mtime\"",
",",
"12",
")",
")",
":",
"if",
"name",
"in",
"pax_headers",
":",
"# The pax header has priority. Avoid overflow.",
"info",
"[",
"name",
"]",
"=",
"0",
"continue",
"val",
"=",
"info",
"[",
"name",
"]",
"if",
"not",
"0",
"<=",
"val",
"<",
"8",
"**",
"(",
"digits",
"-",
"1",
")",
"or",
"isinstance",
"(",
"val",
",",
"float",
")",
":",
"pax_headers",
"[",
"name",
"]",
"=",
"str",
"(",
"val",
")",
"info",
"[",
"name",
"]",
"=",
"0",
"# Create a pax extended header if necessary.",
"if",
"pax_headers",
":",
"buf",
"=",
"self",
".",
"_create_pax_generic_header",
"(",
"pax_headers",
",",
"XHDTYPE",
",",
"encoding",
")",
"else",
":",
"buf",
"=",
"b\"\"",
"return",
"buf",
"+",
"self",
".",
"_create_header",
"(",
"info",
",",
"USTAR_FORMAT",
",",
"\"ascii\"",
",",
"\"replace\"",
")"
] |
Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
|
[
"Return",
"the",
"object",
"as",
"a",
"ustar",
"header",
"block",
".",
"If",
"it",
"cannot",
"be",
"represented",
"this",
"way",
"prepend",
"a",
"pax",
"extended",
"header",
"sequence",
"with",
"supplement",
"information",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1043-L1090
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._posix_split_name
|
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
|
python
|
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
|
[
"def",
"_posix_split_name",
"(",
"self",
",",
"name",
")",
":",
"prefix",
"=",
"name",
"[",
":",
"LENGTH_PREFIX",
"+",
"1",
"]",
"while",
"prefix",
"and",
"prefix",
"[",
"-",
"1",
"]",
"!=",
"\"/\"",
":",
"prefix",
"=",
"prefix",
"[",
":",
"-",
"1",
"]",
"name",
"=",
"name",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"prefix",
"=",
"prefix",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"prefix",
"or",
"len",
"(",
"name",
")",
">",
"LENGTH_NAME",
":",
"raise",
"ValueError",
"(",
"\"name is too long\"",
")",
"return",
"prefix",
",",
"name"
] |
Split a name longer than 100 chars into a prefix
and a name part.
|
[
"Split",
"a",
"name",
"longer",
"than",
"100",
"chars",
"into",
"a",
"prefix",
"and",
"a",
"name",
"part",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1098-L1111
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._create_header
|
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
|
python
|
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
|
[
"def",
"_create_header",
"(",
"info",
",",
"format",
",",
"encoding",
",",
"errors",
")",
":",
"parts",
"=",
"[",
"stn",
"(",
"info",
".",
"get",
"(",
"\"name\"",
",",
"\"\"",
")",
",",
"100",
",",
"encoding",
",",
"errors",
")",
",",
"itn",
"(",
"info",
".",
"get",
"(",
"\"mode\"",
",",
"0",
")",
"&",
"0o7777",
",",
"8",
",",
"format",
")",
",",
"itn",
"(",
"info",
".",
"get",
"(",
"\"uid\"",
",",
"0",
")",
",",
"8",
",",
"format",
")",
",",
"itn",
"(",
"info",
".",
"get",
"(",
"\"gid\"",
",",
"0",
")",
",",
"8",
",",
"format",
")",
",",
"itn",
"(",
"info",
".",
"get",
"(",
"\"size\"",
",",
"0",
")",
",",
"12",
",",
"format",
")",
",",
"itn",
"(",
"info",
".",
"get",
"(",
"\"mtime\"",
",",
"0",
")",
",",
"12",
",",
"format",
")",
",",
"b\" \"",
",",
"# checksum field",
"info",
".",
"get",
"(",
"\"type\"",
",",
"REGTYPE",
")",
",",
"stn",
"(",
"info",
".",
"get",
"(",
"\"linkname\"",
",",
"\"\"",
")",
",",
"100",
",",
"encoding",
",",
"errors",
")",
",",
"info",
".",
"get",
"(",
"\"magic\"",
",",
"POSIX_MAGIC",
")",
",",
"stn",
"(",
"info",
".",
"get",
"(",
"\"uname\"",
",",
"\"\"",
")",
",",
"32",
",",
"encoding",
",",
"errors",
")",
",",
"stn",
"(",
"info",
".",
"get",
"(",
"\"gname\"",
",",
"\"\"",
")",
",",
"32",
",",
"encoding",
",",
"errors",
")",
",",
"itn",
"(",
"info",
".",
"get",
"(",
"\"devmajor\"",
",",
"0",
")",
",",
"8",
",",
"format",
")",
",",
"itn",
"(",
"info",
".",
"get",
"(",
"\"devminor\"",
",",
"0",
")",
",",
"8",
",",
"format",
")",
",",
"stn",
"(",
"info",
".",
"get",
"(",
"\"prefix\"",
",",
"\"\"",
")",
",",
"155",
",",
"encoding",
",",
"errors",
")",
"]",
"buf",
"=",
"struct",
".",
"pack",
"(",
"\"%ds\"",
"%",
"BLOCKSIZE",
",",
"b\"\"",
".",
"join",
"(",
"parts",
")",
")",
"chksum",
"=",
"calc_chksums",
"(",
"buf",
"[",
"-",
"BLOCKSIZE",
":",
"]",
")",
"[",
"0",
"]",
"buf",
"=",
"buf",
"[",
":",
"-",
"364",
"]",
"+",
"(",
"\"%06o\\0\"",
"%",
"chksum",
")",
".",
"encode",
"(",
"\"ascii\"",
")",
"+",
"buf",
"[",
"-",
"357",
":",
"]",
"return",
"buf"
] |
Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
|
[
"Return",
"a",
"header",
"block",
".",
"info",
"is",
"a",
"dictionary",
"with",
"file",
"information",
"format",
"must",
"be",
"one",
"of",
"the",
"*",
"_FORMAT",
"constants",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1114-L1139
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._create_payload
|
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
|
python
|
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
|
[
"def",
"_create_payload",
"(",
"payload",
")",
":",
"blocks",
",",
"remainder",
"=",
"divmod",
"(",
"len",
"(",
"payload",
")",
",",
"BLOCKSIZE",
")",
"if",
"remainder",
">",
"0",
":",
"payload",
"+=",
"(",
"BLOCKSIZE",
"-",
"remainder",
")",
"*",
"NUL",
"return",
"payload"
] |
Return the string payload filled with zero bytes
up to the next 512 byte border.
|
[
"Return",
"the",
"string",
"payload",
"filled",
"with",
"zero",
"bytes",
"up",
"to",
"the",
"next",
"512",
"byte",
"border",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1142-L1149
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._create_gnu_long_header
|
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
|
python
|
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
|
[
"def",
"_create_gnu_long_header",
"(",
"cls",
",",
"name",
",",
"type",
",",
"encoding",
",",
"errors",
")",
":",
"name",
"=",
"name",
".",
"encode",
"(",
"encoding",
",",
"errors",
")",
"+",
"NUL",
"info",
"=",
"{",
"}",
"info",
"[",
"\"name\"",
"]",
"=",
"\"././@LongLink\"",
"info",
"[",
"\"type\"",
"]",
"=",
"type",
"info",
"[",
"\"size\"",
"]",
"=",
"len",
"(",
"name",
")",
"info",
"[",
"\"magic\"",
"]",
"=",
"GNU_MAGIC",
"# create extended header + name blocks.",
"return",
"cls",
".",
"_create_header",
"(",
"info",
",",
"USTAR_FORMAT",
",",
"encoding",
",",
"errors",
")",
"+",
"cls",
".",
"_create_payload",
"(",
"name",
")"
] |
Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
|
[
"Return",
"a",
"GNUTYPE_LONGNAME",
"or",
"GNUTYPE_LONGLINK",
"sequence",
"for",
"name",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1152-L1166
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._create_pax_generic_header
|
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
|
python
|
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
|
[
"def",
"_create_pax_generic_header",
"(",
"cls",
",",
"pax_headers",
",",
"type",
",",
"encoding",
")",
":",
"# Check if one of the fields contains surrogate characters and thereby",
"# forces hdrcharset=BINARY, see _proc_pax() for more information.",
"binary",
"=",
"False",
"for",
"keyword",
",",
"value",
"in",
"pax_headers",
".",
"items",
"(",
")",
":",
"try",
":",
"value",
".",
"encode",
"(",
"\"utf8\"",
",",
"\"strict\"",
")",
"except",
"UnicodeEncodeError",
":",
"binary",
"=",
"True",
"break",
"records",
"=",
"b\"\"",
"if",
"binary",
":",
"# Put the hdrcharset field at the beginning of the header.",
"records",
"+=",
"b\"21 hdrcharset=BINARY\\n\"",
"for",
"keyword",
",",
"value",
"in",
"pax_headers",
".",
"items",
"(",
")",
":",
"keyword",
"=",
"keyword",
".",
"encode",
"(",
"\"utf8\"",
")",
"if",
"binary",
":",
"# Try to restore the original byte representation of `value'.",
"# Needless to say, that the encoding must match the string.",
"value",
"=",
"value",
".",
"encode",
"(",
"encoding",
",",
"\"surrogateescape\"",
")",
"else",
":",
"value",
"=",
"value",
".",
"encode",
"(",
"\"utf8\"",
")",
"l",
"=",
"len",
"(",
"keyword",
")",
"+",
"len",
"(",
"value",
")",
"+",
"3",
"# ' ' + '=' + '\\n'",
"n",
"=",
"p",
"=",
"0",
"while",
"True",
":",
"n",
"=",
"l",
"+",
"len",
"(",
"str",
"(",
"p",
")",
")",
"if",
"n",
"==",
"p",
":",
"break",
"p",
"=",
"n",
"records",
"+=",
"bytes",
"(",
"str",
"(",
"p",
")",
",",
"\"ascii\"",
")",
"+",
"b\" \"",
"+",
"keyword",
"+",
"b\"=\"",
"+",
"value",
"+",
"b\"\\n\"",
"# We use a hardcoded \"././@PaxHeader\" name like star does",
"# instead of the one that POSIX recommends.",
"info",
"=",
"{",
"}",
"info",
"[",
"\"name\"",
"]",
"=",
"\"././@PaxHeader\"",
"info",
"[",
"\"type\"",
"]",
"=",
"type",
"info",
"[",
"\"size\"",
"]",
"=",
"len",
"(",
"records",
")",
"info",
"[",
"\"magic\"",
"]",
"=",
"POSIX_MAGIC",
"# Create pax header + record blocks.",
"return",
"cls",
".",
"_create_header",
"(",
"info",
",",
"USTAR_FORMAT",
",",
"\"ascii\"",
",",
"\"replace\"",
")",
"+",
"cls",
".",
"_create_payload",
"(",
"records",
")"
] |
Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
|
[
"Return",
"a",
"POSIX",
".",
"1",
"-",
"2008",
"extended",
"or",
"global",
"header",
"sequence",
"that",
"contains",
"a",
"list",
"of",
"keyword",
"value",
"pairs",
".",
"The",
"values",
"must",
"be",
"strings",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1169-L1217
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo.frombuf
|
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
|
python
|
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
|
[
"def",
"frombuf",
"(",
"cls",
",",
"buf",
",",
"encoding",
",",
"errors",
")",
":",
"if",
"len",
"(",
"buf",
")",
"==",
"0",
":",
"raise",
"EmptyHeaderError",
"(",
"\"empty header\"",
")",
"if",
"len",
"(",
"buf",
")",
"!=",
"BLOCKSIZE",
":",
"raise",
"TruncatedHeaderError",
"(",
"\"truncated header\"",
")",
"if",
"buf",
".",
"count",
"(",
"NUL",
")",
"==",
"BLOCKSIZE",
":",
"raise",
"EOFHeaderError",
"(",
"\"end of file header\"",
")",
"chksum",
"=",
"nti",
"(",
"buf",
"[",
"148",
":",
"156",
"]",
")",
"if",
"chksum",
"not",
"in",
"calc_chksums",
"(",
"buf",
")",
":",
"raise",
"InvalidHeaderError",
"(",
"\"bad checksum\"",
")",
"obj",
"=",
"cls",
"(",
")",
"obj",
".",
"name",
"=",
"nts",
"(",
"buf",
"[",
"0",
":",
"100",
"]",
",",
"encoding",
",",
"errors",
")",
"obj",
".",
"mode",
"=",
"nti",
"(",
"buf",
"[",
"100",
":",
"108",
"]",
")",
"obj",
".",
"uid",
"=",
"nti",
"(",
"buf",
"[",
"108",
":",
"116",
"]",
")",
"obj",
".",
"gid",
"=",
"nti",
"(",
"buf",
"[",
"116",
":",
"124",
"]",
")",
"obj",
".",
"size",
"=",
"nti",
"(",
"buf",
"[",
"124",
":",
"136",
"]",
")",
"obj",
".",
"mtime",
"=",
"nti",
"(",
"buf",
"[",
"136",
":",
"148",
"]",
")",
"obj",
".",
"chksum",
"=",
"chksum",
"obj",
".",
"type",
"=",
"buf",
"[",
"156",
":",
"157",
"]",
"obj",
".",
"linkname",
"=",
"nts",
"(",
"buf",
"[",
"157",
":",
"257",
"]",
",",
"encoding",
",",
"errors",
")",
"obj",
".",
"uname",
"=",
"nts",
"(",
"buf",
"[",
"265",
":",
"297",
"]",
",",
"encoding",
",",
"errors",
")",
"obj",
".",
"gname",
"=",
"nts",
"(",
"buf",
"[",
"297",
":",
"329",
"]",
",",
"encoding",
",",
"errors",
")",
"obj",
".",
"devmajor",
"=",
"nti",
"(",
"buf",
"[",
"329",
":",
"337",
"]",
")",
"obj",
".",
"devminor",
"=",
"nti",
"(",
"buf",
"[",
"337",
":",
"345",
"]",
")",
"prefix",
"=",
"nts",
"(",
"buf",
"[",
"345",
":",
"500",
"]",
",",
"encoding",
",",
"errors",
")",
"# Old V7 tar format represents a directory as a regular",
"# file with a trailing slash.",
"if",
"obj",
".",
"type",
"==",
"AREGTYPE",
"and",
"obj",
".",
"name",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"obj",
".",
"type",
"=",
"DIRTYPE",
"# The old GNU sparse format occupies some of the unused",
"# space in the buffer for up to 4 sparse structures.",
"# Save the them for later processing in _proc_sparse().",
"if",
"obj",
".",
"type",
"==",
"GNUTYPE_SPARSE",
":",
"pos",
"=",
"386",
"structs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"try",
":",
"offset",
"=",
"nti",
"(",
"buf",
"[",
"pos",
":",
"pos",
"+",
"12",
"]",
")",
"numbytes",
"=",
"nti",
"(",
"buf",
"[",
"pos",
"+",
"12",
":",
"pos",
"+",
"24",
"]",
")",
"except",
"ValueError",
":",
"break",
"structs",
".",
"append",
"(",
"(",
"offset",
",",
"numbytes",
")",
")",
"pos",
"+=",
"24",
"isextended",
"=",
"bool",
"(",
"buf",
"[",
"482",
"]",
")",
"origsize",
"=",
"nti",
"(",
"buf",
"[",
"483",
":",
"495",
"]",
")",
"obj",
".",
"_sparse_structs",
"=",
"(",
"structs",
",",
"isextended",
",",
"origsize",
")",
"# Remove redundant slashes from directories.",
"if",
"obj",
".",
"isdir",
"(",
")",
":",
"obj",
".",
"name",
"=",
"obj",
".",
"name",
".",
"rstrip",
"(",
"\"/\"",
")",
"# Reconstruct a ustar longname.",
"if",
"prefix",
"and",
"obj",
".",
"type",
"not",
"in",
"GNU_TYPES",
":",
"obj",
".",
"name",
"=",
"prefix",
"+",
"\"/\"",
"+",
"obj",
".",
"name",
"return",
"obj"
] |
Construct a TarInfo object from a 512 byte bytes object.
|
[
"Construct",
"a",
"TarInfo",
"object",
"from",
"a",
"512",
"byte",
"bytes",
"object",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1220-L1280
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo.fromtarfile
|
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
|
python
|
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
|
[
"def",
"fromtarfile",
"(",
"cls",
",",
"tarfile",
")",
":",
"buf",
"=",
"tarfile",
".",
"fileobj",
".",
"read",
"(",
"BLOCKSIZE",
")",
"obj",
"=",
"cls",
".",
"frombuf",
"(",
"buf",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"obj",
".",
"offset",
"=",
"tarfile",
".",
"fileobj",
".",
"tell",
"(",
")",
"-",
"BLOCKSIZE",
"return",
"obj",
".",
"_proc_member",
"(",
"tarfile",
")"
] |
Return the next TarInfo object from TarFile object
tarfile.
|
[
"Return",
"the",
"next",
"TarInfo",
"object",
"from",
"TarFile",
"object",
"tarfile",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1283-L1290
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._proc_member
|
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
|
python
|
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
|
[
"def",
"_proc_member",
"(",
"self",
",",
"tarfile",
")",
":",
"if",
"self",
".",
"type",
"in",
"(",
"GNUTYPE_LONGNAME",
",",
"GNUTYPE_LONGLINK",
")",
":",
"return",
"self",
".",
"_proc_gnulong",
"(",
"tarfile",
")",
"elif",
"self",
".",
"type",
"==",
"GNUTYPE_SPARSE",
":",
"return",
"self",
".",
"_proc_sparse",
"(",
"tarfile",
")",
"elif",
"self",
".",
"type",
"in",
"(",
"XHDTYPE",
",",
"XGLTYPE",
",",
"SOLARIS_XHDTYPE",
")",
":",
"return",
"self",
".",
"_proc_pax",
"(",
"tarfile",
")",
"else",
":",
"return",
"self",
".",
"_proc_builtin",
"(",
"tarfile",
")"
] |
Choose the right processing method depending on
the type and call it.
|
[
"Choose",
"the",
"right",
"processing",
"method",
"depending",
"on",
"the",
"type",
"and",
"call",
"it",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1303-L1314
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._proc_builtin
|
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
|
python
|
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
|
[
"def",
"_proc_builtin",
"(",
"self",
",",
"tarfile",
")",
":",
"self",
".",
"offset_data",
"=",
"tarfile",
".",
"fileobj",
".",
"tell",
"(",
")",
"offset",
"=",
"self",
".",
"offset_data",
"if",
"self",
".",
"isreg",
"(",
")",
"or",
"self",
".",
"type",
"not",
"in",
"SUPPORTED_TYPES",
":",
"# Skip the following data blocks.",
"offset",
"+=",
"self",
".",
"_block",
"(",
"self",
".",
"size",
")",
"tarfile",
".",
"offset",
"=",
"offset",
"# Patch the TarInfo object with saved global",
"# header information.",
"self",
".",
"_apply_pax_info",
"(",
"tarfile",
".",
"pax_headers",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"return",
"self"
] |
Process a builtin type or an unknown type which
will be treated as a regular file.
|
[
"Process",
"a",
"builtin",
"type",
"or",
"an",
"unknown",
"type",
"which",
"will",
"be",
"treated",
"as",
"a",
"regular",
"file",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1316-L1331
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._proc_gnulong
|
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
|
python
|
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
|
[
"def",
"_proc_gnulong",
"(",
"self",
",",
"tarfile",
")",
":",
"buf",
"=",
"tarfile",
".",
"fileobj",
".",
"read",
"(",
"self",
".",
"_block",
"(",
"self",
".",
"size",
")",
")",
"# Fetch the next header and process it.",
"try",
":",
"next",
"=",
"self",
".",
"fromtarfile",
"(",
"tarfile",
")",
"except",
"HeaderError",
":",
"raise",
"SubsequentHeaderError",
"(",
"\"missing or bad subsequent header\"",
")",
"# Patch the TarInfo object from the next header with",
"# the longname information.",
"next",
".",
"offset",
"=",
"self",
".",
"offset",
"if",
"self",
".",
"type",
"==",
"GNUTYPE_LONGNAME",
":",
"next",
".",
"name",
"=",
"nts",
"(",
"buf",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"elif",
"self",
".",
"type",
"==",
"GNUTYPE_LONGLINK",
":",
"next",
".",
"linkname",
"=",
"nts",
"(",
"buf",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"return",
"next"
] |
Process the blocks that hold a GNU longname
or longlink member.
|
[
"Process",
"the",
"blocks",
"that",
"hold",
"a",
"GNU",
"longname",
"or",
"longlink",
"member",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1333-L1353
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._proc_sparse
|
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
|
python
|
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
|
[
"def",
"_proc_sparse",
"(",
"self",
",",
"tarfile",
")",
":",
"# We already collected some sparse structures in frombuf().",
"structs",
",",
"isextended",
",",
"origsize",
"=",
"self",
".",
"_sparse_structs",
"del",
"self",
".",
"_sparse_structs",
"# Collect sparse structures from extended header blocks.",
"while",
"isextended",
":",
"buf",
"=",
"tarfile",
".",
"fileobj",
".",
"read",
"(",
"BLOCKSIZE",
")",
"pos",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"21",
")",
":",
"try",
":",
"offset",
"=",
"nti",
"(",
"buf",
"[",
"pos",
":",
"pos",
"+",
"12",
"]",
")",
"numbytes",
"=",
"nti",
"(",
"buf",
"[",
"pos",
"+",
"12",
":",
"pos",
"+",
"24",
"]",
")",
"except",
"ValueError",
":",
"break",
"if",
"offset",
"and",
"numbytes",
":",
"structs",
".",
"append",
"(",
"(",
"offset",
",",
"numbytes",
")",
")",
"pos",
"+=",
"24",
"isextended",
"=",
"bool",
"(",
"buf",
"[",
"504",
"]",
")",
"self",
".",
"sparse",
"=",
"structs",
"self",
".",
"offset_data",
"=",
"tarfile",
".",
"fileobj",
".",
"tell",
"(",
")",
"tarfile",
".",
"offset",
"=",
"self",
".",
"offset_data",
"+",
"self",
".",
"_block",
"(",
"self",
".",
"size",
")",
"self",
".",
"size",
"=",
"origsize",
"return",
"self"
] |
Process a GNU sparse header plus extra headers.
|
[
"Process",
"a",
"GNU",
"sparse",
"header",
"plus",
"extra",
"headers",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1355-L1381
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._proc_pax
|
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
|
python
|
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
|
[
"def",
"_proc_pax",
"(",
"self",
",",
"tarfile",
")",
":",
"# Read the header information.",
"buf",
"=",
"tarfile",
".",
"fileobj",
".",
"read",
"(",
"self",
".",
"_block",
"(",
"self",
".",
"size",
")",
")",
"# A pax header stores supplemental information for either",
"# the following file (extended) or all following files",
"# (global).",
"if",
"self",
".",
"type",
"==",
"XGLTYPE",
":",
"pax_headers",
"=",
"tarfile",
".",
"pax_headers",
"else",
":",
"pax_headers",
"=",
"tarfile",
".",
"pax_headers",
".",
"copy",
"(",
")",
"# Check if the pax header contains a hdrcharset field. This tells us",
"# the encoding of the path, linkpath, uname and gname fields. Normally,",
"# these fields are UTF-8 encoded but since POSIX.1-2008 tar",
"# implementations are allowed to store them as raw binary strings if",
"# the translation to UTF-8 fails.",
"match",
"=",
"re",
".",
"search",
"(",
"br\"\\d+ hdrcharset=([^\\n]+)\\n\"",
",",
"buf",
")",
"if",
"match",
"is",
"not",
"None",
":",
"pax_headers",
"[",
"\"hdrcharset\"",
"]",
"=",
"match",
".",
"group",
"(",
"1",
")",
".",
"decode",
"(",
"\"utf8\"",
")",
"# For the time being, we don't care about anything other than \"BINARY\".",
"# The only other value that is currently allowed by the standard is",
"# \"ISO-IR 10646 2000 UTF-8\" in other words UTF-8.",
"hdrcharset",
"=",
"pax_headers",
".",
"get",
"(",
"\"hdrcharset\"",
")",
"if",
"hdrcharset",
"==",
"\"BINARY\"",
":",
"encoding",
"=",
"tarfile",
".",
"encoding",
"else",
":",
"encoding",
"=",
"\"utf8\"",
"# Parse pax header information. A record looks like that:",
"# \"%d %s=%s\\n\" % (length, keyword, value). length is the size",
"# of the complete record including the length field itself and",
"# the newline. keyword and value are both UTF-8 encoded strings.",
"regex",
"=",
"re",
".",
"compile",
"(",
"br\"(\\d+) ([^=]+)=\"",
")",
"pos",
"=",
"0",
"while",
"True",
":",
"match",
"=",
"regex",
".",
"match",
"(",
"buf",
",",
"pos",
")",
"if",
"not",
"match",
":",
"break",
"length",
",",
"keyword",
"=",
"match",
".",
"groups",
"(",
")",
"length",
"=",
"int",
"(",
"length",
")",
"value",
"=",
"buf",
"[",
"match",
".",
"end",
"(",
"2",
")",
"+",
"1",
":",
"match",
".",
"start",
"(",
"1",
")",
"+",
"length",
"-",
"1",
"]",
"# Normally, we could just use \"utf8\" as the encoding and \"strict\"",
"# as the error handler, but we better not take the risk. For",
"# example, GNU tar <= 1.23 is known to store filenames it cannot",
"# translate to UTF-8 as raw strings (unfortunately without a",
"# hdrcharset=BINARY header).",
"# We first try the strict standard encoding, and if that fails we",
"# fall back on the user's encoding and error handler.",
"keyword",
"=",
"self",
".",
"_decode_pax_field",
"(",
"keyword",
",",
"\"utf8\"",
",",
"\"utf8\"",
",",
"tarfile",
".",
"errors",
")",
"if",
"keyword",
"in",
"PAX_NAME_FIELDS",
":",
"value",
"=",
"self",
".",
"_decode_pax_field",
"(",
"value",
",",
"encoding",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"else",
":",
"value",
"=",
"self",
".",
"_decode_pax_field",
"(",
"value",
",",
"\"utf8\"",
",",
"\"utf8\"",
",",
"tarfile",
".",
"errors",
")",
"pax_headers",
"[",
"keyword",
"]",
"=",
"value",
"pos",
"+=",
"length",
"# Fetch the next header.",
"try",
":",
"next",
"=",
"self",
".",
"fromtarfile",
"(",
"tarfile",
")",
"except",
"HeaderError",
":",
"raise",
"SubsequentHeaderError",
"(",
"\"missing or bad subsequent header\"",
")",
"# Process GNU sparse information.",
"if",
"\"GNU.sparse.map\"",
"in",
"pax_headers",
":",
"# GNU extended sparse format version 0.1.",
"self",
".",
"_proc_gnusparse_01",
"(",
"next",
",",
"pax_headers",
")",
"elif",
"\"GNU.sparse.size\"",
"in",
"pax_headers",
":",
"# GNU extended sparse format version 0.0.",
"self",
".",
"_proc_gnusparse_00",
"(",
"next",
",",
"pax_headers",
",",
"buf",
")",
"elif",
"pax_headers",
".",
"get",
"(",
"\"GNU.sparse.major\"",
")",
"==",
"\"1\"",
"and",
"pax_headers",
".",
"get",
"(",
"\"GNU.sparse.minor\"",
")",
"==",
"\"0\"",
":",
"# GNU extended sparse format version 1.0.",
"self",
".",
"_proc_gnusparse_10",
"(",
"next",
",",
"pax_headers",
",",
"tarfile",
")",
"if",
"self",
".",
"type",
"in",
"(",
"XHDTYPE",
",",
"SOLARIS_XHDTYPE",
")",
":",
"# Patch the TarInfo object with the extended header info.",
"next",
".",
"_apply_pax_info",
"(",
"pax_headers",
",",
"tarfile",
".",
"encoding",
",",
"tarfile",
".",
"errors",
")",
"next",
".",
"offset",
"=",
"self",
".",
"offset",
"if",
"\"size\"",
"in",
"pax_headers",
":",
"# If the extended header replaces the size field,",
"# we need to recalculate the offset where the next",
"# header starts.",
"offset",
"=",
"next",
".",
"offset_data",
"if",
"next",
".",
"isreg",
"(",
")",
"or",
"next",
".",
"type",
"not",
"in",
"SUPPORTED_TYPES",
":",
"offset",
"+=",
"next",
".",
"_block",
"(",
"next",
".",
"size",
")",
"tarfile",
".",
"offset",
"=",
"offset",
"return",
"next"
] |
Process an extended or global header as described in
POSIX.1-2008.
|
[
"Process",
"an",
"extended",
"or",
"global",
"header",
"as",
"described",
"in",
"POSIX",
".",
"1",
"-",
"2008",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1383-L1483
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._proc_gnusparse_00
|
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
|
python
|
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
|
[
"def",
"_proc_gnusparse_00",
"(",
"self",
",",
"next",
",",
"pax_headers",
",",
"buf",
")",
":",
"offsets",
"=",
"[",
"]",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"br\"\\d+ GNU.sparse.offset=(\\d+)\\n\"",
",",
"buf",
")",
":",
"offsets",
".",
"append",
"(",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"numbytes",
"=",
"[",
"]",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"br\"\\d+ GNU.sparse.numbytes=(\\d+)\\n\"",
",",
"buf",
")",
":",
"numbytes",
".",
"append",
"(",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"next",
".",
"sparse",
"=",
"list",
"(",
"zip",
"(",
"offsets",
",",
"numbytes",
")",
")"
] |
Process a GNU tar extended sparse header, version 0.0.
|
[
"Process",
"a",
"GNU",
"tar",
"extended",
"sparse",
"header",
"version",
"0",
".",
"0",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1485-L1494
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._proc_gnusparse_01
|
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
|
python
|
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
|
[
"def",
"_proc_gnusparse_01",
"(",
"self",
",",
"next",
",",
"pax_headers",
")",
":",
"sparse",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"pax_headers",
"[",
"\"GNU.sparse.map\"",
"]",
".",
"split",
"(",
"\",\"",
")",
"]",
"next",
".",
"sparse",
"=",
"list",
"(",
"zip",
"(",
"sparse",
"[",
":",
":",
"2",
"]",
",",
"sparse",
"[",
"1",
":",
":",
"2",
"]",
")",
")"
] |
Process a GNU tar extended sparse header, version 0.1.
|
[
"Process",
"a",
"GNU",
"tar",
"extended",
"sparse",
"header",
"version",
"0",
".",
"1",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1496-L1500
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._proc_gnusparse_10
|
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
|
python
|
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
|
[
"def",
"_proc_gnusparse_10",
"(",
"self",
",",
"next",
",",
"pax_headers",
",",
"tarfile",
")",
":",
"fields",
"=",
"None",
"sparse",
"=",
"[",
"]",
"buf",
"=",
"tarfile",
".",
"fileobj",
".",
"read",
"(",
"BLOCKSIZE",
")",
"fields",
",",
"buf",
"=",
"buf",
".",
"split",
"(",
"b\"\\n\"",
",",
"1",
")",
"fields",
"=",
"int",
"(",
"fields",
")",
"while",
"len",
"(",
"sparse",
")",
"<",
"fields",
"*",
"2",
":",
"if",
"b\"\\n\"",
"not",
"in",
"buf",
":",
"buf",
"+=",
"tarfile",
".",
"fileobj",
".",
"read",
"(",
"BLOCKSIZE",
")",
"number",
",",
"buf",
"=",
"buf",
".",
"split",
"(",
"b\"\\n\"",
",",
"1",
")",
"sparse",
".",
"append",
"(",
"int",
"(",
"number",
")",
")",
"next",
".",
"offset_data",
"=",
"tarfile",
".",
"fileobj",
".",
"tell",
"(",
")",
"next",
".",
"sparse",
"=",
"list",
"(",
"zip",
"(",
"sparse",
"[",
":",
":",
"2",
"]",
",",
"sparse",
"[",
"1",
":",
":",
"2",
"]",
")",
")"
] |
Process a GNU tar extended sparse header, version 1.0.
|
[
"Process",
"a",
"GNU",
"tar",
"extended",
"sparse",
"header",
"version",
"1",
".",
"0",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1502-L1516
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._apply_pax_info
|
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
|
python
|
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
|
[
"def",
"_apply_pax_info",
"(",
"self",
",",
"pax_headers",
",",
"encoding",
",",
"errors",
")",
":",
"for",
"keyword",
",",
"value",
"in",
"pax_headers",
".",
"items",
"(",
")",
":",
"if",
"keyword",
"==",
"\"GNU.sparse.name\"",
":",
"setattr",
"(",
"self",
",",
"\"path\"",
",",
"value",
")",
"elif",
"keyword",
"==",
"\"GNU.sparse.size\"",
":",
"setattr",
"(",
"self",
",",
"\"size\"",
",",
"int",
"(",
"value",
")",
")",
"elif",
"keyword",
"==",
"\"GNU.sparse.realsize\"",
":",
"setattr",
"(",
"self",
",",
"\"size\"",
",",
"int",
"(",
"value",
")",
")",
"elif",
"keyword",
"in",
"PAX_FIELDS",
":",
"if",
"keyword",
"in",
"PAX_NUMBER_FIELDS",
":",
"try",
":",
"value",
"=",
"PAX_NUMBER_FIELDS",
"[",
"keyword",
"]",
"(",
"value",
")",
"except",
"ValueError",
":",
"value",
"=",
"0",
"if",
"keyword",
"==",
"\"path\"",
":",
"value",
"=",
"value",
".",
"rstrip",
"(",
"\"/\"",
")",
"setattr",
"(",
"self",
",",
"keyword",
",",
"value",
")",
"self",
".",
"pax_headers",
"=",
"pax_headers",
".",
"copy",
"(",
")"
] |
Replace fields with supplemental information from a previous
pax extended or global header.
|
[
"Replace",
"fields",
"with",
"supplemental",
"information",
"from",
"a",
"previous",
"pax",
"extended",
"or",
"global",
"header",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1518-L1539
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._decode_pax_field
|
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
|
python
|
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
|
[
"def",
"_decode_pax_field",
"(",
"self",
",",
"value",
",",
"encoding",
",",
"fallback_encoding",
",",
"fallback_errors",
")",
":",
"try",
":",
"return",
"value",
".",
"decode",
"(",
"encoding",
",",
"\"strict\"",
")",
"except",
"UnicodeDecodeError",
":",
"return",
"value",
".",
"decode",
"(",
"fallback_encoding",
",",
"fallback_errors",
")"
] |
Decode a single field from a pax record.
|
[
"Decode",
"a",
"single",
"field",
"from",
"a",
"pax",
"record",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1541-L1547
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarInfo._block
|
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
|
python
|
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
|
[
"def",
"_block",
"(",
"self",
",",
"count",
")",
":",
"blocks",
",",
"remainder",
"=",
"divmod",
"(",
"count",
",",
"BLOCKSIZE",
")",
"if",
"remainder",
":",
"blocks",
"+=",
"1",
"return",
"blocks",
"*",
"BLOCKSIZE"
] |
Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
|
[
"Round",
"up",
"a",
"byte",
"count",
"by",
"BLOCKSIZE",
"and",
"return",
"it",
"e",
".",
"g",
".",
"_block",
"(",
"834",
")",
"=",
">",
"1024",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1549-L1556
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarFile.open
|
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
|
python
|
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
|
[
"def",
"open",
"(",
"cls",
",",
"name",
"=",
"None",
",",
"mode",
"=",
"\"r\"",
",",
"fileobj",
"=",
"None",
",",
"bufsize",
"=",
"RECORDSIZE",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"name",
"and",
"not",
"fileobj",
":",
"raise",
"ValueError",
"(",
"\"nothing to open\"",
")",
"if",
"mode",
"in",
"(",
"\"r\"",
",",
"\"r:*\"",
")",
":",
"# Find out which *open() is appropriate for opening the file.",
"for",
"comptype",
"in",
"cls",
".",
"OPEN_METH",
":",
"func",
"=",
"getattr",
"(",
"cls",
",",
"cls",
".",
"OPEN_METH",
"[",
"comptype",
"]",
")",
"if",
"fileobj",
"is",
"not",
"None",
":",
"saved_pos",
"=",
"fileobj",
".",
"tell",
"(",
")",
"try",
":",
"return",
"func",
"(",
"name",
",",
"\"r\"",
",",
"fileobj",
",",
"*",
"*",
"kwargs",
")",
"except",
"(",
"ReadError",
",",
"CompressionError",
")",
"as",
"e",
":",
"if",
"fileobj",
"is",
"not",
"None",
":",
"fileobj",
".",
"seek",
"(",
"saved_pos",
")",
"continue",
"raise",
"ReadError",
"(",
"\"file could not be opened successfully\"",
")",
"elif",
"\":\"",
"in",
"mode",
":",
"filemode",
",",
"comptype",
"=",
"mode",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"filemode",
"=",
"filemode",
"or",
"\"r\"",
"comptype",
"=",
"comptype",
"or",
"\"tar\"",
"# Select the *open() function according to",
"# given compression.",
"if",
"comptype",
"in",
"cls",
".",
"OPEN_METH",
":",
"func",
"=",
"getattr",
"(",
"cls",
",",
"cls",
".",
"OPEN_METH",
"[",
"comptype",
"]",
")",
"else",
":",
"raise",
"CompressionError",
"(",
"\"unknown compression type %r\"",
"%",
"comptype",
")",
"return",
"func",
"(",
"name",
",",
"filemode",
",",
"fileobj",
",",
"*",
"*",
"kwargs",
")",
"elif",
"\"|\"",
"in",
"mode",
":",
"filemode",
",",
"comptype",
"=",
"mode",
".",
"split",
"(",
"\"|\"",
",",
"1",
")",
"filemode",
"=",
"filemode",
"or",
"\"r\"",
"comptype",
"=",
"comptype",
"or",
"\"tar\"",
"if",
"filemode",
"not",
"in",
"\"rw\"",
":",
"raise",
"ValueError",
"(",
"\"mode must be 'r' or 'w'\"",
")",
"stream",
"=",
"_Stream",
"(",
"name",
",",
"filemode",
",",
"comptype",
",",
"fileobj",
",",
"bufsize",
")",
"try",
":",
"t",
"=",
"cls",
"(",
"name",
",",
"filemode",
",",
"stream",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"stream",
".",
"close",
"(",
")",
"raise",
"t",
".",
"_extfileobj",
"=",
"False",
"return",
"t",
"elif",
"mode",
"in",
"\"aw\"",
":",
"return",
"cls",
".",
"taropen",
"(",
"name",
",",
"mode",
",",
"fileobj",
",",
"*",
"*",
"kwargs",
")",
"raise",
"ValueError",
"(",
"\"undiscernible mode\"",
")"
] |
Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
|
[
"Open",
"a",
"tar",
"archive",
"for",
"reading",
"writing",
"or",
"appending",
".",
"Return",
"an",
"appropriate",
"TarFile",
"class",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1714-L1787
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarFile.taropen
|
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
|
python
|
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
|
[
"def",
"taropen",
"(",
"cls",
",",
"name",
",",
"mode",
"=",
"\"r\"",
",",
"fileobj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"mode",
")",
">",
"1",
"or",
"mode",
"not",
"in",
"\"raw\"",
":",
"raise",
"ValueError",
"(",
"\"mode must be 'r', 'a' or 'w'\"",
")",
"return",
"cls",
"(",
"name",
",",
"mode",
",",
"fileobj",
",",
"*",
"*",
"kwargs",
")"
] |
Open uncompressed tar archive name for reading or writing.
|
[
"Open",
"uncompressed",
"tar",
"archive",
"name",
"for",
"reading",
"or",
"writing",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1790-L1795
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarFile.gzopen
|
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
|
python
|
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
|
[
"def",
"gzopen",
"(",
"cls",
",",
"name",
",",
"mode",
"=",
"\"r\"",
",",
"fileobj",
"=",
"None",
",",
"compresslevel",
"=",
"9",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"mode",
")",
">",
"1",
"or",
"mode",
"not",
"in",
"\"rw\"",
":",
"raise",
"ValueError",
"(",
"\"mode must be 'r' or 'w'\"",
")",
"try",
":",
"import",
"gzip",
"gzip",
".",
"GzipFile",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
":",
"raise",
"CompressionError",
"(",
"\"gzip module is not available\"",
")",
"extfileobj",
"=",
"fileobj",
"is",
"not",
"None",
"try",
":",
"fileobj",
"=",
"gzip",
".",
"GzipFile",
"(",
"name",
",",
"mode",
"+",
"\"b\"",
",",
"compresslevel",
",",
"fileobj",
")",
"t",
"=",
"cls",
".",
"taropen",
"(",
"name",
",",
"mode",
",",
"fileobj",
",",
"*",
"*",
"kwargs",
")",
"except",
"IOError",
":",
"if",
"not",
"extfileobj",
"and",
"fileobj",
"is",
"not",
"None",
":",
"fileobj",
".",
"close",
"(",
")",
"if",
"fileobj",
"is",
"None",
":",
"raise",
"raise",
"ReadError",
"(",
"\"not a gzip file\"",
")",
"except",
":",
"if",
"not",
"extfileobj",
"and",
"fileobj",
"is",
"not",
"None",
":",
"fileobj",
".",
"close",
"(",
")",
"raise",
"t",
".",
"_extfileobj",
"=",
"extfileobj",
"return",
"t"
] |
Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
|
[
"Open",
"gzip",
"compressed",
"tar",
"archive",
"name",
"for",
"reading",
"or",
"writing",
".",
"Appending",
"is",
"not",
"allowed",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1798-L1826
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarFile.bz2open
|
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
|
python
|
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
|
[
"def",
"bz2open",
"(",
"cls",
",",
"name",
",",
"mode",
"=",
"\"r\"",
",",
"fileobj",
"=",
"None",
",",
"compresslevel",
"=",
"9",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"mode",
")",
">",
"1",
"or",
"mode",
"not",
"in",
"\"rw\"",
":",
"raise",
"ValueError",
"(",
"\"mode must be 'r' or 'w'.\"",
")",
"try",
":",
"import",
"bz2",
"except",
"ImportError",
":",
"raise",
"CompressionError",
"(",
"\"bz2 module is not available\"",
")",
"if",
"fileobj",
"is",
"not",
"None",
":",
"fileobj",
"=",
"_BZ2Proxy",
"(",
"fileobj",
",",
"mode",
")",
"else",
":",
"fileobj",
"=",
"bz2",
".",
"BZ2File",
"(",
"name",
",",
"mode",
",",
"compresslevel",
"=",
"compresslevel",
")",
"try",
":",
"t",
"=",
"cls",
".",
"taropen",
"(",
"name",
",",
"mode",
",",
"fileobj",
",",
"*",
"*",
"kwargs",
")",
"except",
"(",
"IOError",
",",
"EOFError",
")",
":",
"fileobj",
".",
"close",
"(",
")",
"raise",
"ReadError",
"(",
"\"not a bzip2 file\"",
")",
"t",
".",
"_extfileobj",
"=",
"False",
"return",
"t"
] |
Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
|
[
"Open",
"bzip2",
"compressed",
"tar",
"archive",
"name",
"for",
"reading",
"or",
"writing",
".",
"Appending",
"is",
"not",
"allowed",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1829-L1852
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarFile.close
|
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
|
python
|
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"closed",
":",
"return",
"if",
"self",
".",
"mode",
"in",
"\"aw\"",
":",
"self",
".",
"fileobj",
".",
"write",
"(",
"NUL",
"*",
"(",
"BLOCKSIZE",
"*",
"2",
")",
")",
"self",
".",
"offset",
"+=",
"(",
"BLOCKSIZE",
"*",
"2",
")",
"# fill up the end with zero-blocks",
"# (like option -b20 for tar does)",
"blocks",
",",
"remainder",
"=",
"divmod",
"(",
"self",
".",
"offset",
",",
"RECORDSIZE",
")",
"if",
"remainder",
">",
"0",
":",
"self",
".",
"fileobj",
".",
"write",
"(",
"NUL",
"*",
"(",
"RECORDSIZE",
"-",
"remainder",
")",
")",
"if",
"not",
"self",
".",
"_extfileobj",
":",
"self",
".",
"fileobj",
".",
"close",
"(",
")",
"self",
".",
"closed",
"=",
"True"
] |
Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
|
[
"Close",
"the",
"TarFile",
".",
"In",
"write",
"-",
"mode",
"two",
"finishing",
"zero",
"blocks",
"are",
"appended",
"to",
"the",
"archive",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1864-L1882
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarFile.getmember
|
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
|
python
|
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
|
[
"def",
"getmember",
"(",
"self",
",",
"name",
")",
":",
"tarinfo",
"=",
"self",
".",
"_getmember",
"(",
"name",
")",
"if",
"tarinfo",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"filename %r not found\"",
"%",
"name",
")",
"return",
"tarinfo"
] |
Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
|
[
"Return",
"a",
"TarInfo",
"object",
"for",
"member",
"name",
".",
"If",
"name",
"can",
"not",
"be",
"found",
"in",
"the",
"archive",
"KeyError",
"is",
"raised",
".",
"If",
"a",
"member",
"occurs",
"more",
"than",
"once",
"in",
"the",
"archive",
"its",
"last",
"occurrence",
"is",
"assumed",
"to",
"be",
"the",
"most",
"up",
"-",
"to",
"-",
"date",
"version",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1884-L1893
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarFile.getmembers
|
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
|
python
|
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
|
[
"def",
"getmembers",
"(",
"self",
")",
":",
"self",
".",
"_check",
"(",
")",
"if",
"not",
"self",
".",
"_loaded",
":",
"# if we want to obtain a list of",
"self",
".",
"_load",
"(",
")",
"# all members, we first have to",
"# scan the whole archive.",
"return",
"self",
".",
"members"
] |
Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
|
[
"Return",
"the",
"members",
"of",
"the",
"archive",
"as",
"a",
"list",
"of",
"TarInfo",
"objects",
".",
"The",
"list",
"has",
"the",
"same",
"order",
"as",
"the",
"members",
"in",
"the",
"archive",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1895-L1903
|
train
|
pypa/pipenv
|
pipenv/vendor/distlib/_backport/tarfile.py
|
TarFile.gettarinfo
|
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
|
python
|
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
|
[
"def",
"gettarinfo",
"(",
"self",
",",
"name",
"=",
"None",
",",
"arcname",
"=",
"None",
",",
"fileobj",
"=",
"None",
")",
":",
"self",
".",
"_check",
"(",
"\"aw\"",
")",
"# When fileobj is given, replace name by",
"# fileobj's real name.",
"if",
"fileobj",
"is",
"not",
"None",
":",
"name",
"=",
"fileobj",
".",
"name",
"# Building the name of the member in the archive.",
"# Backward slashes are converted to forward slashes,",
"# Absolute paths are turned to relative paths.",
"if",
"arcname",
"is",
"None",
":",
"arcname",
"=",
"name",
"drv",
",",
"arcname",
"=",
"os",
".",
"path",
".",
"splitdrive",
"(",
"arcname",
")",
"arcname",
"=",
"arcname",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"\"/\"",
")",
"arcname",
"=",
"arcname",
".",
"lstrip",
"(",
"\"/\"",
")",
"# Now, fill the TarInfo object with",
"# information specific for the file.",
"tarinfo",
"=",
"self",
".",
"tarinfo",
"(",
")",
"tarinfo",
".",
"tarfile",
"=",
"self",
"# Use os.stat or os.lstat, depending on platform",
"# and if symlinks shall be resolved.",
"if",
"fileobj",
"is",
"None",
":",
"if",
"hasattr",
"(",
"os",
",",
"\"lstat\"",
")",
"and",
"not",
"self",
".",
"dereference",
":",
"statres",
"=",
"os",
".",
"lstat",
"(",
"name",
")",
"else",
":",
"statres",
"=",
"os",
".",
"stat",
"(",
"name",
")",
"else",
":",
"statres",
"=",
"os",
".",
"fstat",
"(",
"fileobj",
".",
"fileno",
"(",
")",
")",
"linkname",
"=",
"\"\"",
"stmd",
"=",
"statres",
".",
"st_mode",
"if",
"stat",
".",
"S_ISREG",
"(",
"stmd",
")",
":",
"inode",
"=",
"(",
"statres",
".",
"st_ino",
",",
"statres",
".",
"st_dev",
")",
"if",
"not",
"self",
".",
"dereference",
"and",
"statres",
".",
"st_nlink",
">",
"1",
"and",
"inode",
"in",
"self",
".",
"inodes",
"and",
"arcname",
"!=",
"self",
".",
"inodes",
"[",
"inode",
"]",
":",
"# Is it a hardlink to an already",
"# archived file?",
"type",
"=",
"LNKTYPE",
"linkname",
"=",
"self",
".",
"inodes",
"[",
"inode",
"]",
"else",
":",
"# The inode is added only if its valid.",
"# For win32 it is always 0.",
"type",
"=",
"REGTYPE",
"if",
"inode",
"[",
"0",
"]",
":",
"self",
".",
"inodes",
"[",
"inode",
"]",
"=",
"arcname",
"elif",
"stat",
".",
"S_ISDIR",
"(",
"stmd",
")",
":",
"type",
"=",
"DIRTYPE",
"elif",
"stat",
".",
"S_ISFIFO",
"(",
"stmd",
")",
":",
"type",
"=",
"FIFOTYPE",
"elif",
"stat",
".",
"S_ISLNK",
"(",
"stmd",
")",
":",
"type",
"=",
"SYMTYPE",
"linkname",
"=",
"os",
".",
"readlink",
"(",
"name",
")",
"elif",
"stat",
".",
"S_ISCHR",
"(",
"stmd",
")",
":",
"type",
"=",
"CHRTYPE",
"elif",
"stat",
".",
"S_ISBLK",
"(",
"stmd",
")",
":",
"type",
"=",
"BLKTYPE",
"else",
":",
"return",
"None",
"# Fill the TarInfo object with all",
"# information we can get.",
"tarinfo",
".",
"name",
"=",
"arcname",
"tarinfo",
".",
"mode",
"=",
"stmd",
"tarinfo",
".",
"uid",
"=",
"statres",
".",
"st_uid",
"tarinfo",
".",
"gid",
"=",
"statres",
".",
"st_gid",
"if",
"type",
"==",
"REGTYPE",
":",
"tarinfo",
".",
"size",
"=",
"statres",
".",
"st_size",
"else",
":",
"tarinfo",
".",
"size",
"=",
"0",
"tarinfo",
".",
"mtime",
"=",
"statres",
".",
"st_mtime",
"tarinfo",
".",
"type",
"=",
"type",
"tarinfo",
".",
"linkname",
"=",
"linkname",
"if",
"pwd",
":",
"try",
":",
"tarinfo",
".",
"uname",
"=",
"pwd",
".",
"getpwuid",
"(",
"tarinfo",
".",
"uid",
")",
"[",
"0",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"grp",
":",
"try",
":",
"tarinfo",
".",
"gname",
"=",
"grp",
".",
"getgrgid",
"(",
"tarinfo",
".",
"gid",
")",
"[",
"0",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"type",
"in",
"(",
"CHRTYPE",
",",
"BLKTYPE",
")",
":",
"if",
"hasattr",
"(",
"os",
",",
"\"major\"",
")",
"and",
"hasattr",
"(",
"os",
",",
"\"minor\"",
")",
":",
"tarinfo",
".",
"devmajor",
"=",
"os",
".",
"major",
"(",
"statres",
".",
"st_rdev",
")",
"tarinfo",
".",
"devminor",
"=",
"os",
".",
"minor",
"(",
"statres",
".",
"st_rdev",
")",
"return",
"tarinfo"
] |
Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
|
[
"Create",
"a",
"TarInfo",
"object",
"for",
"either",
"the",
"file",
"name",
"or",
"the",
"file",
"object",
"fileobj",
"(",
"using",
"os",
".",
"fstat",
"on",
"its",
"file",
"descriptor",
")",
".",
"You",
"can",
"modify",
"some",
"of",
"the",
"TarInfo",
"s",
"attributes",
"before",
"you",
"add",
"it",
"using",
"addfile",
"()",
".",
"If",
"given",
"arcname",
"specifies",
"an",
"alternative",
"name",
"for",
"the",
"file",
"in",
"the",
"archive",
"."
] |
cae8d76c210b9777e90aab76e9c4b0e53bb19cde
|
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1911-L2007
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.