repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
log2timeline/dfvfs | examples/list_file_entries.py | FileEntryLister.ListFileEntries | def ListFileEntries(self, base_path_specs, output_writer):
"""Lists file entries in the base path specification.
Args:
base_path_specs (list[dfvfs.PathSpec]): source path specification.
output_writer (StdoutWriter): output writer.
"""
for base_path_spec in base_path_specs:
file_system = resolver.Resolver.OpenFileSystem(base_path_spec)
file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)
if file_entry is None:
logging.warning(
'Unable to open base path specification:\n{0:s}'.format(
base_path_spec.comparable))
return
self._ListFileEntry(file_system, file_entry, '', output_writer) | python | def ListFileEntries(self, base_path_specs, output_writer):
"""Lists file entries in the base path specification.
Args:
base_path_specs (list[dfvfs.PathSpec]): source path specification.
output_writer (StdoutWriter): output writer.
"""
for base_path_spec in base_path_specs:
file_system = resolver.Resolver.OpenFileSystem(base_path_spec)
file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)
if file_entry is None:
logging.warning(
'Unable to open base path specification:\n{0:s}'.format(
base_path_spec.comparable))
return
self._ListFileEntry(file_system, file_entry, '', output_writer) | [
"def",
"ListFileEntries",
"(",
"self",
",",
"base_path_specs",
",",
"output_writer",
")",
":",
"for",
"base_path_spec",
"in",
"base_path_specs",
":",
"file_system",
"=",
"resolver",
".",
"Resolver",
".",
"OpenFileSystem",
"(",
"base_path_spec",
")",
"file_entry",
... | Lists file entries in the base path specification.
Args:
base_path_specs (list[dfvfs.PathSpec]): source path specification.
output_writer (StdoutWriter): output writer. | [
"Lists",
"file",
"entries",
"in",
"the",
"base",
"path",
"specification",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/examples/list_file_entries.py#L60-L76 | train | 208,400 |
log2timeline/dfvfs | examples/list_file_entries.py | FileOutputWriter.WriteFileEntry | def WriteFileEntry(self, path):
"""Writes the file path to file.
Args:
path (str): path of the file.
"""
string = '{0:s}\n'.format(path)
encoded_string = self._EncodeString(string)
self._file_object.write(encoded_string) | python | def WriteFileEntry(self, path):
"""Writes the file path to file.
Args:
path (str): path of the file.
"""
string = '{0:s}\n'.format(path)
encoded_string = self._EncodeString(string)
self._file_object.write(encoded_string) | [
"def",
"WriteFileEntry",
"(",
"self",
",",
"path",
")",
":",
"string",
"=",
"'{0:s}\\n'",
".",
"format",
"(",
"path",
")",
"encoded_string",
"=",
"self",
".",
"_EncodeString",
"(",
"string",
")",
"self",
".",
"_file_object",
".",
"write",
"(",
"encoded_str... | Writes the file path to file.
Args:
path (str): path of the file. | [
"Writes",
"the",
"file",
"path",
"to",
"file",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/examples/list_file_entries.py#L159-L168 | train | 208,401 |
log2timeline/dfvfs | dfvfs/vfs/fake_file_system.py | FakeFileSystem.AddFileEntry | def AddFileEntry(
self, path, file_entry_type=definitions.FILE_ENTRY_TYPE_FILE,
file_data=None, link_data=None):
"""Adds a fake file entry.
Args:
path (str): path of the file entry.
file_entry_type (Optional[str]): type of the file entry object.
file_data (Optional[bytes]): data of the fake file-like object.
link_data (Optional[bytes]): link data of the fake file entry object.
Raises:
KeyError: if the path already exists.
ValueError: if the file data is set but the file entry type is not a file
or if the link data is set but the file entry type is not a link.
"""
if path in self._paths:
raise KeyError('File entry already set for path: {0:s}.'.format(path))
if file_data and file_entry_type != definitions.FILE_ENTRY_TYPE_FILE:
raise ValueError('File data set for non-file file entry type.')
if link_data and file_entry_type != definitions.FILE_ENTRY_TYPE_LINK:
raise ValueError('Link data set for non-link file entry type.')
if file_data is not None:
path_data = file_data
elif link_data is not None:
path_data = link_data
else:
path_data = None
self._paths[path] = (file_entry_type, path_data) | python | def AddFileEntry(
self, path, file_entry_type=definitions.FILE_ENTRY_TYPE_FILE,
file_data=None, link_data=None):
"""Adds a fake file entry.
Args:
path (str): path of the file entry.
file_entry_type (Optional[str]): type of the file entry object.
file_data (Optional[bytes]): data of the fake file-like object.
link_data (Optional[bytes]): link data of the fake file entry object.
Raises:
KeyError: if the path already exists.
ValueError: if the file data is set but the file entry type is not a file
or if the link data is set but the file entry type is not a link.
"""
if path in self._paths:
raise KeyError('File entry already set for path: {0:s}.'.format(path))
if file_data and file_entry_type != definitions.FILE_ENTRY_TYPE_FILE:
raise ValueError('File data set for non-file file entry type.')
if link_data and file_entry_type != definitions.FILE_ENTRY_TYPE_LINK:
raise ValueError('Link data set for non-link file entry type.')
if file_data is not None:
path_data = file_data
elif link_data is not None:
path_data = link_data
else:
path_data = None
self._paths[path] = (file_entry_type, path_data) | [
"def",
"AddFileEntry",
"(",
"self",
",",
"path",
",",
"file_entry_type",
"=",
"definitions",
".",
"FILE_ENTRY_TYPE_FILE",
",",
"file_data",
"=",
"None",
",",
"link_data",
"=",
"None",
")",
":",
"if",
"path",
"in",
"self",
".",
"_paths",
":",
"raise",
"KeyE... | Adds a fake file entry.
Args:
path (str): path of the file entry.
file_entry_type (Optional[str]): type of the file entry object.
file_data (Optional[bytes]): data of the fake file-like object.
link_data (Optional[bytes]): link data of the fake file entry object.
Raises:
KeyError: if the path already exists.
ValueError: if the file data is set but the file entry type is not a file
or if the link data is set but the file entry type is not a link. | [
"Adds",
"a",
"fake",
"file",
"entry",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/fake_file_system.py#L59-L91 | train | 208,402 |
log2timeline/dfvfs | dfvfs/vfs/fake_file_system.py | FakeFileSystem.GetDataByPath | def GetDataByPath(self, path):
"""Retrieves the data associated to a path.
Args:
path (str): path of the file entry.
Returns:
bytes: data or None if not available.
"""
_, path_data = self._paths.get(path, (None, None))
return path_data | python | def GetDataByPath(self, path):
"""Retrieves the data associated to a path.
Args:
path (str): path of the file entry.
Returns:
bytes: data or None if not available.
"""
_, path_data = self._paths.get(path, (None, None))
return path_data | [
"def",
"GetDataByPath",
"(",
"self",
",",
"path",
")",
":",
"_",
",",
"path_data",
"=",
"self",
".",
"_paths",
".",
"get",
"(",
"path",
",",
"(",
"None",
",",
"None",
")",
")",
"return",
"path_data"
] | Retrieves the data associated to a path.
Args:
path (str): path of the file entry.
Returns:
bytes: data or None if not available. | [
"Retrieves",
"the",
"data",
"associated",
"to",
"a",
"path",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/fake_file_system.py#L116-L126 | train | 208,403 |
log2timeline/dfvfs | dfvfs/vfs/fake_file_system.py | FakeFileSystem.GetFileEntryByPath | def GetFileEntryByPath(self, path):
"""Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available.
"""
if path is None:
return None
file_entry_type, _ = self._paths.get(path, (None, None))
if not file_entry_type:
return None
path_spec = fake_path_spec.FakePathSpec(location=path)
return fake_file_entry.FakeFileEntry(
self._resolver_context, self, path_spec,
file_entry_type=file_entry_type) | python | def GetFileEntryByPath(self, path):
"""Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available.
"""
if path is None:
return None
file_entry_type, _ = self._paths.get(path, (None, None))
if not file_entry_type:
return None
path_spec = fake_path_spec.FakePathSpec(location=path)
return fake_file_entry.FakeFileEntry(
self._resolver_context, self, path_spec,
file_entry_type=file_entry_type) | [
"def",
"GetFileEntryByPath",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
"is",
"None",
":",
"return",
"None",
"file_entry_type",
",",
"_",
"=",
"self",
".",
"_paths",
".",
"get",
"(",
"path",
",",
"(",
"None",
",",
"None",
")",
")",
"if",
"no... | Retrieves a file entry for a path.
Args:
path (str): path of the file entry.
Returns:
FakeFileEntry: a file entry or None if not available. | [
"Retrieves",
"a",
"file",
"entry",
"for",
"a",
"path",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/fake_file_system.py#L128-L147 | train | 208,404 |
log2timeline/dfvfs | dfvfs/vfs/file_system.py | FileSystem.BasenamePath | def BasenamePath(self, path):
"""Determines the basename of the path.
Args:
path (str): path.
Returns:
str: basename of the path.
"""
if path.endswith(self.PATH_SEPARATOR):
path = path[:-1]
_, _, basename = path.rpartition(self.PATH_SEPARATOR)
return basename | python | def BasenamePath(self, path):
"""Determines the basename of the path.
Args:
path (str): path.
Returns:
str: basename of the path.
"""
if path.endswith(self.PATH_SEPARATOR):
path = path[:-1]
_, _, basename = path.rpartition(self.PATH_SEPARATOR)
return basename | [
"def",
"BasenamePath",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
".",
"endswith",
"(",
"self",
".",
"PATH_SEPARATOR",
")",
":",
"path",
"=",
"path",
"[",
":",
"-",
"1",
"]",
"_",
",",
"_",
",",
"basename",
"=",
"path",
".",
"rpartition",
... | Determines the basename of the path.
Args:
path (str): path.
Returns:
str: basename of the path. | [
"Determines",
"the",
"basename",
"of",
"the",
"path",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/file_system.py#L68-L80 | train | 208,405 |
log2timeline/dfvfs | dfvfs/vfs/file_system.py | FileSystem.DirnamePath | def DirnamePath(self, path):
"""Determines the directory name of the path.
The file system root is represented by an empty string.
Args:
path (str): path.
Returns:
str: directory name of the path or None.
"""
if path.endswith(self.PATH_SEPARATOR):
path = path[:-1]
if not path:
return None
dirname, _, _ = path.rpartition(self.PATH_SEPARATOR)
return dirname | python | def DirnamePath(self, path):
"""Determines the directory name of the path.
The file system root is represented by an empty string.
Args:
path (str): path.
Returns:
str: directory name of the path or None.
"""
if path.endswith(self.PATH_SEPARATOR):
path = path[:-1]
if not path:
return None
dirname, _, _ = path.rpartition(self.PATH_SEPARATOR)
return dirname | [
"def",
"DirnamePath",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
".",
"endswith",
"(",
"self",
".",
"PATH_SEPARATOR",
")",
":",
"path",
"=",
"path",
"[",
":",
"-",
"1",
"]",
"if",
"not",
"path",
":",
"return",
"None",
"dirname",
",",
"_",
... | Determines the directory name of the path.
The file system root is represented by an empty string.
Args:
path (str): path.
Returns:
str: directory name of the path or None. | [
"Determines",
"the",
"directory",
"name",
"of",
"the",
"path",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/file_system.py#L105-L122 | train | 208,406 |
log2timeline/dfvfs | dfvfs/vfs/file_system.py | FileSystem.GetDataStreamByPathSpec | def GetDataStreamByPathSpec(self, path_spec):
"""Retrieves a data stream for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
DataStream: a data stream or None if not available.
"""
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
data_stream_name = getattr(path_spec, 'data_stream', None)
return file_entry.GetDataStream(data_stream_name) | python | def GetDataStreamByPathSpec(self, path_spec):
"""Retrieves a data stream for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
DataStream: a data stream or None if not available.
"""
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
data_stream_name = getattr(path_spec, 'data_stream', None)
return file_entry.GetDataStream(data_stream_name) | [
"def",
"GetDataStreamByPathSpec",
"(",
"self",
",",
"path_spec",
")",
":",
"file_entry",
"=",
"self",
".",
"GetFileEntryByPathSpec",
"(",
"path_spec",
")",
"if",
"not",
"file_entry",
":",
"return",
"None",
"data_stream_name",
"=",
"getattr",
"(",
"path_spec",
",... | Retrieves a data stream for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
DataStream: a data stream or None if not available. | [
"Retrieves",
"a",
"data",
"stream",
"for",
"a",
"path",
"specification",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/file_system.py#L135-L149 | train | 208,407 |
log2timeline/dfvfs | dfvfs/vfs/file_system.py | FileSystem.GetFileObjectByPathSpec | def GetFileObjectByPathSpec(self, path_spec):
"""Retrieves a file-like object for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
FileIO: a file-like object or None if not available.
"""
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
return file_entry.GetFileObject() | python | def GetFileObjectByPathSpec(self, path_spec):
"""Retrieves a file-like object for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
FileIO: a file-like object or None if not available.
"""
file_entry = self.GetFileEntryByPathSpec(path_spec)
if not file_entry:
return None
return file_entry.GetFileObject() | [
"def",
"GetFileObjectByPathSpec",
"(",
"self",
",",
"path_spec",
")",
":",
"file_entry",
"=",
"self",
".",
"GetFileEntryByPathSpec",
"(",
"path_spec",
")",
"if",
"not",
"file_entry",
":",
"return",
"None",
"return",
"file_entry",
".",
"GetFileObject",
"(",
")"
] | Retrieves a file-like object for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
FileIO: a file-like object or None if not available. | [
"Retrieves",
"a",
"file",
"-",
"like",
"object",
"for",
"a",
"path",
"specification",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/file_system.py#L162-L175 | train | 208,408 |
log2timeline/dfvfs | dfvfs/vfs/file_system.py | FileSystem.GetPathSegmentAndSuffix | def GetPathSegmentAndSuffix(self, base_path, path):
"""Determines the path segment and suffix of the path.
None is returned if the path does not start with the base path and
an empty string if the path exactly matches the base path.
Args:
base_path (str): base path.
path (str): path.
Returns:
tuple[str, str]: path segment and suffix string.
"""
if path is None or base_path is None or not path.startswith(base_path):
return None, None
path_index = len(base_path)
if base_path and not base_path.endswith(self.PATH_SEPARATOR):
path_index += 1
if path_index == len(path):
return '', ''
path_segment, _, suffix = path[path_index:].partition(self.PATH_SEPARATOR)
return path_segment, suffix | python | def GetPathSegmentAndSuffix(self, base_path, path):
"""Determines the path segment and suffix of the path.
None is returned if the path does not start with the base path and
an empty string if the path exactly matches the base path.
Args:
base_path (str): base path.
path (str): path.
Returns:
tuple[str, str]: path segment and suffix string.
"""
if path is None or base_path is None or not path.startswith(base_path):
return None, None
path_index = len(base_path)
if base_path and not base_path.endswith(self.PATH_SEPARATOR):
path_index += 1
if path_index == len(path):
return '', ''
path_segment, _, suffix = path[path_index:].partition(self.PATH_SEPARATOR)
return path_segment, suffix | [
"def",
"GetPathSegmentAndSuffix",
"(",
"self",
",",
"base_path",
",",
"path",
")",
":",
"if",
"path",
"is",
"None",
"or",
"base_path",
"is",
"None",
"or",
"not",
"path",
".",
"startswith",
"(",
"base_path",
")",
":",
"return",
"None",
",",
"None",
"path_... | Determines the path segment and suffix of the path.
None is returned if the path does not start with the base path and
an empty string if the path exactly matches the base path.
Args:
base_path (str): base path.
path (str): path.
Returns:
tuple[str, str]: path segment and suffix string. | [
"Determines",
"the",
"path",
"segment",
"and",
"suffix",
"of",
"the",
"path",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/file_system.py#L177-L201 | train | 208,409 |
log2timeline/dfvfs | dfvfs/resolver/resolver.py | Resolver.OpenFileEntry | def OpenFileEntry(cls, path_spec_object, resolver_context=None):
"""Opens a file entry object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileEntry: file entry or None if the path specification could not be
resolved.
"""
file_system = cls.OpenFileSystem(
path_spec_object, resolver_context=resolver_context)
if resolver_context is None:
resolver_context = cls._resolver_context
file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)
# Release the file system so it will be removed from the cache
# when the file entry is destroyed.
resolver_context.ReleaseFileSystem(file_system)
return file_entry | python | def OpenFileEntry(cls, path_spec_object, resolver_context=None):
"""Opens a file entry object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileEntry: file entry or None if the path specification could not be
resolved.
"""
file_system = cls.OpenFileSystem(
path_spec_object, resolver_context=resolver_context)
if resolver_context is None:
resolver_context = cls._resolver_context
file_entry = file_system.GetFileEntryByPathSpec(path_spec_object)
# Release the file system so it will be removed from the cache
# when the file entry is destroyed.
resolver_context.ReleaseFileSystem(file_system)
return file_entry | [
"def",
"OpenFileEntry",
"(",
"cls",
",",
"path_spec_object",
",",
"resolver_context",
"=",
"None",
")",
":",
"file_system",
"=",
"cls",
".",
"OpenFileSystem",
"(",
"path_spec_object",
",",
"resolver_context",
"=",
"resolver_context",
")",
"if",
"resolver_context",
... | Opens a file entry object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileEntry: file entry or None if the path specification could not be
resolved. | [
"Opens",
"a",
"file",
"entry",
"object",
"defined",
"by",
"path",
"specification",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/resolver/resolver.py#L42-L66 | train | 208,410 |
log2timeline/dfvfs | dfvfs/resolver/resolver.py | Resolver.OpenFileObject | def OpenFileObject(cls, path_spec_object, resolver_context=None):
"""Opens a file-like object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileIO: file-like object or None if the path specification could not
be resolved.
Raises:
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported.
"""
if not isinstance(path_spec_object, path_spec.PathSpec):
raise TypeError('Unsupported path specification type.')
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec_object.HasParent():
raise errors.PathSpecError(
'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec_object, 'identifier', None)
if not mount_point:
raise errors.PathSpecError(
'Unsupported path specification without mount point identifier.')
path_spec_object = mount_manager.MountPointManager.GetMountPoint(
mount_point)
if not path_spec_object:
raise errors.MountPointError(
'No such mount point: {0:s}'.format(mount_point))
file_object = resolver_context.GetFileObject(path_spec_object)
if not file_object:
resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)
file_object = resolver_helper.NewFileObject(resolver_context)
file_object.open(path_spec=path_spec_object)
return file_object | python | def OpenFileObject(cls, path_spec_object, resolver_context=None):
"""Opens a file-like object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileIO: file-like object or None if the path specification could not
be resolved.
Raises:
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported.
"""
if not isinstance(path_spec_object, path_spec.PathSpec):
raise TypeError('Unsupported path specification type.')
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec_object.HasParent():
raise errors.PathSpecError(
'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec_object, 'identifier', None)
if not mount_point:
raise errors.PathSpecError(
'Unsupported path specification without mount point identifier.')
path_spec_object = mount_manager.MountPointManager.GetMountPoint(
mount_point)
if not path_spec_object:
raise errors.MountPointError(
'No such mount point: {0:s}'.format(mount_point))
file_object = resolver_context.GetFileObject(path_spec_object)
if not file_object:
resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)
file_object = resolver_helper.NewFileObject(resolver_context)
file_object.open(path_spec=path_spec_object)
return file_object | [
"def",
"OpenFileObject",
"(",
"cls",
",",
"path_spec_object",
",",
"resolver_context",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"path_spec_object",
",",
"path_spec",
".",
"PathSpec",
")",
":",
"raise",
"TypeError",
"(",
"'Unsupported path specificati... | Opens a file-like object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileIO: file-like object or None if the path specification could not
be resolved.
Raises:
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported. | [
"Opens",
"a",
"file",
"-",
"like",
"object",
"defined",
"by",
"path",
"specification",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/resolver/resolver.py#L69-L113 | train | 208,411 |
log2timeline/dfvfs | dfvfs/resolver/resolver.py | Resolver.OpenFileSystem | def OpenFileSystem(cls, path_spec_object, resolver_context=None):
"""Opens a file system object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileSystem: file system or None if the path specification could not
be resolved or has no file system object.
Raises:
AccessError: if the access to open the file system was denied.
BackEndError: if the file system cannot be opened.
MountPointError: if the mount point specified in the path specification
does not exist.
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported.
"""
if not isinstance(path_spec_object, path_spec.PathSpec):
raise TypeError('Unsupported path specification type.')
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec_object.HasParent():
raise errors.PathSpecError(
'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec_object, 'identifier', None)
if not mount_point:
raise errors.PathSpecError(
'Unsupported path specification without mount point identifier.')
path_spec_object = mount_manager.MountPointManager.GetMountPoint(
mount_point)
if not path_spec_object:
raise errors.MountPointError(
'No such mount point: {0:s}'.format(mount_point))
file_system = resolver_context.GetFileSystem(path_spec_object)
if not file_system:
resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)
file_system = resolver_helper.NewFileSystem(resolver_context)
try:
file_system.Open(path_spec_object)
except (IOError, ValueError) as exception:
raise errors.BackEndError(
'Unable to open file system with error: {0!s}'.format(exception))
return file_system | python | def OpenFileSystem(cls, path_spec_object, resolver_context=None):
"""Opens a file system object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileSystem: file system or None if the path specification could not
be resolved or has no file system object.
Raises:
AccessError: if the access to open the file system was denied.
BackEndError: if the file system cannot be opened.
MountPointError: if the mount point specified in the path specification
does not exist.
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported.
"""
if not isinstance(path_spec_object, path_spec.PathSpec):
raise TypeError('Unsupported path specification type.')
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec_object.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec_object.HasParent():
raise errors.PathSpecError(
'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec_object, 'identifier', None)
if not mount_point:
raise errors.PathSpecError(
'Unsupported path specification without mount point identifier.')
path_spec_object = mount_manager.MountPointManager.GetMountPoint(
mount_point)
if not path_spec_object:
raise errors.MountPointError(
'No such mount point: {0:s}'.format(mount_point))
file_system = resolver_context.GetFileSystem(path_spec_object)
if not file_system:
resolver_helper = cls._GetResolverHelper(path_spec_object.type_indicator)
file_system = resolver_helper.NewFileSystem(resolver_context)
try:
file_system.Open(path_spec_object)
except (IOError, ValueError) as exception:
raise errors.BackEndError(
'Unable to open file system with error: {0!s}'.format(exception))
return file_system | [
"def",
"OpenFileSystem",
"(",
"cls",
",",
"path_spec_object",
",",
"resolver_context",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"path_spec_object",
",",
"path_spec",
".",
"PathSpec",
")",
":",
"raise",
"TypeError",
"(",
"'Unsupported path specificati... | Opens a file system object defined by path specification.
Args:
path_spec_object (PathSpec): path specification.
resolver_context (Optional[Context]): resolver context, where None
represents the built in context which is not multi process safe.
Returns:
FileSystem: file system or None if the path specification could not
be resolved or has no file system object.
Raises:
AccessError: if the access to open the file system was denied.
BackEndError: if the file system cannot be opened.
MountPointError: if the mount point specified in the path specification
does not exist.
PathSpecError: if the path specification is incorrect.
TypeError: if the path specification type is unsupported. | [
"Opens",
"a",
"file",
"system",
"object",
"defined",
"by",
"path",
"specification",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/resolver/resolver.py#L116-L169 | train | 208,412 |
log2timeline/dfvfs | dfvfs/lib/bde.py | BDEVolumeOpen | def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain):
"""Opens the BDE volume using the path specification.
Args:
bde_volume (pybde.volume): BDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain.
"""
password = key_chain.GetCredential(path_spec, 'password')
if password:
bde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
bde_volume.set_recovery_password(recovery_password)
startup_key = key_chain.GetCredential(path_spec, 'startup_key')
if startup_key:
bde_volume.read_startup_key(startup_key)
bde_volume.open_file_object(file_object) | python | def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain):
"""Opens the BDE volume using the path specification.
Args:
bde_volume (pybde.volume): BDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain.
"""
password = key_chain.GetCredential(path_spec, 'password')
if password:
bde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
bde_volume.set_recovery_password(recovery_password)
startup_key = key_chain.GetCredential(path_spec, 'startup_key')
if startup_key:
bde_volume.read_startup_key(startup_key)
bde_volume.open_file_object(file_object) | [
"def",
"BDEVolumeOpen",
"(",
"bde_volume",
",",
"path_spec",
",",
"file_object",
",",
"key_chain",
")",
":",
"password",
"=",
"key_chain",
".",
"GetCredential",
"(",
"path_spec",
",",
"'password'",
")",
"if",
"password",
":",
"bde_volume",
".",
"set_password",
... | Opens the BDE volume using the path specification.
Args:
bde_volume (pybde.volume): BDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain. | [
"Opens",
"the",
"BDE",
"volume",
"using",
"the",
"path",
"specification",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/bde.py#L7-L28 | train | 208,413 |
log2timeline/dfvfs | dfvfs/compression/manager.py | CompressionManager.GetDecompressor | def GetDecompressor(cls, compression_method):
"""Retrieves the decompressor object for a specific compression method.
Args:
compression_method (str): compression method identifier.
Returns:
Decompressor: decompressor or None if the compression method does
not exists.
"""
compression_method = compression_method.lower()
decompressor = cls._decompressors.get(compression_method, None)
if not decompressor:
return None
return decompressor() | python | def GetDecompressor(cls, compression_method):
"""Retrieves the decompressor object for a specific compression method.
Args:
compression_method (str): compression method identifier.
Returns:
Decompressor: decompressor or None if the compression method does
not exists.
"""
compression_method = compression_method.lower()
decompressor = cls._decompressors.get(compression_method, None)
if not decompressor:
return None
return decompressor() | [
"def",
"GetDecompressor",
"(",
"cls",
",",
"compression_method",
")",
":",
"compression_method",
"=",
"compression_method",
".",
"lower",
"(",
")",
"decompressor",
"=",
"cls",
".",
"_decompressors",
".",
"get",
"(",
"compression_method",
",",
"None",
")",
"if",
... | Retrieves the decompressor object for a specific compression method.
Args:
compression_method (str): compression method identifier.
Returns:
Decompressor: decompressor or None if the compression method does
not exists. | [
"Retrieves",
"the",
"decompressor",
"object",
"for",
"a",
"specific",
"compression",
"method",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/compression/manager.py#L31-L46 | train | 208,414 |
log2timeline/dfvfs | dfvfs/compression/manager.py | CompressionManager.RegisterDecompressor | def RegisterDecompressor(cls, decompressor):
"""Registers a decompressor for a specific compression method.
Args:
decompressor (type): decompressor class.
Raises:
KeyError: if the corresponding decompressor is already set.
"""
compression_method = decompressor.COMPRESSION_METHOD.lower()
if compression_method in cls._decompressors:
raise KeyError(
'Decompressor for compression method: {0:s} already set.'.format(
decompressor.COMPRESSION_METHOD))
cls._decompressors[compression_method] = decompressor | python | def RegisterDecompressor(cls, decompressor):
"""Registers a decompressor for a specific compression method.
Args:
decompressor (type): decompressor class.
Raises:
KeyError: if the corresponding decompressor is already set.
"""
compression_method = decompressor.COMPRESSION_METHOD.lower()
if compression_method in cls._decompressors:
raise KeyError(
'Decompressor for compression method: {0:s} already set.'.format(
decompressor.COMPRESSION_METHOD))
cls._decompressors[compression_method] = decompressor | [
"def",
"RegisterDecompressor",
"(",
"cls",
",",
"decompressor",
")",
":",
"compression_method",
"=",
"decompressor",
".",
"COMPRESSION_METHOD",
".",
"lower",
"(",
")",
"if",
"compression_method",
"in",
"cls",
".",
"_decompressors",
":",
"raise",
"KeyError",
"(",
... | Registers a decompressor for a specific compression method.
Args:
decompressor (type): decompressor class.
Raises:
KeyError: if the corresponding decompressor is already set. | [
"Registers",
"a",
"decompressor",
"for",
"a",
"specific",
"compression",
"method",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/compression/manager.py#L49-L64 | train | 208,415 |
log2timeline/dfvfs | dfvfs/file_io/encrypted_stream_io.py | EncryptedStream._GetDecrypter | def _GetDecrypter(self):
"""Retrieves a decrypter.
Returns:
Decrypter: decrypter.
Raises:
IOError: if the decrypter cannot be initialized.
OSError: if the decrypter cannot be initialized.
"""
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(self._path_spec)
try:
credentials = resolver.Resolver.key_chain.GetCredentials(self._path_spec)
return encryption_manager.EncryptionManager.GetDecrypter(
self._encryption_method, **credentials)
except ValueError as exception:
raise IOError(exception) | python | def _GetDecrypter(self):
"""Retrieves a decrypter.
Returns:
Decrypter: decrypter.
Raises:
IOError: if the decrypter cannot be initialized.
OSError: if the decrypter cannot be initialized.
"""
resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(self._path_spec)
try:
credentials = resolver.Resolver.key_chain.GetCredentials(self._path_spec)
return encryption_manager.EncryptionManager.GetDecrypter(
self._encryption_method, **credentials)
except ValueError as exception:
raise IOError(exception) | [
"def",
"_GetDecrypter",
"(",
"self",
")",
":",
"resolver",
".",
"Resolver",
".",
"key_chain",
".",
"ExtractCredentialsFromPathSpec",
"(",
"self",
".",
"_path_spec",
")",
"try",
":",
"credentials",
"=",
"resolver",
".",
"Resolver",
".",
"key_chain",
".",
"GetCr... | Retrieves a decrypter.
Returns:
Decrypter: decrypter.
Raises:
IOError: if the decrypter cannot be initialized.
OSError: if the decrypter cannot be initialized. | [
"Retrieves",
"a",
"decrypter",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encrypted_stream_io.py#L68-L85 | train | 208,416 |
log2timeline/dfvfs | dfvfs/file_io/encrypted_stream_io.py | EncryptedStream._GetDecryptedStreamSize | def _GetDecryptedStreamSize(self):
"""Retrieves the decrypted stream size.
Returns:
int: decrypted stream size.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decrypter = self._GetDecrypter()
self._decrypted_data = b''
encrypted_data_offset = 0
encrypted_data_size = self._file_object.get_size()
decrypted_stream_size = 0
while encrypted_data_offset < encrypted_data_size:
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encrypted_data_offset += read_count
decrypted_stream_size += self._decrypted_data_size
return decrypted_stream_size | python | def _GetDecryptedStreamSize(self):
"""Retrieves the decrypted stream size.
Returns:
int: decrypted stream size.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decrypter = self._GetDecrypter()
self._decrypted_data = b''
encrypted_data_offset = 0
encrypted_data_size = self._file_object.get_size()
decrypted_stream_size = 0
while encrypted_data_offset < encrypted_data_size:
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encrypted_data_offset += read_count
decrypted_stream_size += self._decrypted_data_size
return decrypted_stream_size | [
"def",
"_GetDecryptedStreamSize",
"(",
"self",
")",
":",
"self",
".",
"_file_object",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_SET",
")",
"self",
".",
"_decrypter",
"=",
"self",
".",
"_GetDecrypter",
"(",
")",
"self",
".",
"_decrypted_data",
"=",
"b'... | Retrieves the decrypted stream size.
Returns:
int: decrypted stream size. | [
"Retrieves",
"the",
"decrypted",
"stream",
"size",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encrypted_stream_io.py#L87-L110 | train | 208,417 |
log2timeline/dfvfs | dfvfs/file_io/encrypted_stream_io.py | EncryptedStream._Open | def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not self._file_object_set_in_init and not path_spec:
raise ValueError('Missing path specification.')
if not self._file_object_set_in_init:
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
self._encryption_method = getattr(path_spec, 'encryption_method', None)
if self._encryption_method is None:
raise errors.PathSpecError(
'Path specification missing encryption method.')
self._file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
self._path_spec = path_spec | python | def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
if not self._file_object_set_in_init and not path_spec:
raise ValueError('Missing path specification.')
if not self._file_object_set_in_init:
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
self._encryption_method = getattr(path_spec, 'encryption_method', None)
if self._encryption_method is None:
raise errors.PathSpecError(
'Path specification missing encryption method.')
self._file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
self._path_spec = path_spec | [
"def",
"_Open",
"(",
"self",
",",
"path_spec",
"=",
"None",
",",
"mode",
"=",
"'rb'",
")",
":",
"if",
"not",
"self",
".",
"_file_object_set_in_init",
"and",
"not",
"path_spec",
":",
"raise",
"ValueError",
"(",
"'Missing path specification.'",
")",
"if",
"not... | Opens the file-like object.
Args:
path_spec (Optional[PathSpec]): path specification.
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid. | [
"Opens",
"the",
"file",
"-",
"like",
"object",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encrypted_stream_io.py#L112-L143 | train | 208,418 |
log2timeline/dfvfs | dfvfs/file_io/encrypted_stream_io.py | EncryptedStream._AlignDecryptedDataOffset | def _AlignDecryptedDataOffset(self, decrypted_data_offset):
"""Aligns the encrypted file with the decrypted data offset.
Args:
decrypted_data_offset (int): decrypted data offset.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decrypter = self._GetDecrypter()
self._decrypted_data = b''
encrypted_data_offset = 0
encrypted_data_size = self._file_object.get_size()
while encrypted_data_offset < encrypted_data_size:
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encrypted_data_offset += read_count
if decrypted_data_offset < self._decrypted_data_size:
self._decrypted_data_offset = decrypted_data_offset
break
decrypted_data_offset -= self._decrypted_data_size | python | def _AlignDecryptedDataOffset(self, decrypted_data_offset):
"""Aligns the encrypted file with the decrypted data offset.
Args:
decrypted_data_offset (int): decrypted data offset.
"""
self._file_object.seek(0, os.SEEK_SET)
self._decrypter = self._GetDecrypter()
self._decrypted_data = b''
encrypted_data_offset = 0
encrypted_data_size = self._file_object.get_size()
while encrypted_data_offset < encrypted_data_size:
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
if read_count == 0:
break
encrypted_data_offset += read_count
if decrypted_data_offset < self._decrypted_data_size:
self._decrypted_data_offset = decrypted_data_offset
break
decrypted_data_offset -= self._decrypted_data_size | [
"def",
"_AlignDecryptedDataOffset",
"(",
"self",
",",
"decrypted_data_offset",
")",
":",
"self",
".",
"_file_object",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_SET",
")",
"self",
".",
"_decrypter",
"=",
"self",
".",
"_GetDecrypter",
"(",
")",
"self",
".... | Aligns the encrypted file with the decrypted data offset.
Args:
decrypted_data_offset (int): decrypted data offset. | [
"Aligns",
"the",
"encrypted",
"file",
"with",
"the",
"decrypted",
"data",
"offset",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encrypted_stream_io.py#L145-L170 | train | 208,419 |
log2timeline/dfvfs | dfvfs/file_io/encrypted_stream_io.py | EncryptedStream._ReadEncryptedData | def _ReadEncryptedData(self, read_size):
"""Reads encrypted data from the file-like object.
Args:
read_size (int): number of bytes of encrypted data to read.
Returns:
int: number of bytes of encrypted data read.
"""
encrypted_data = self._file_object.read(read_size)
read_count = len(encrypted_data)
self._encrypted_data = b''.join([self._encrypted_data, encrypted_data])
self._decrypted_data, self._encrypted_data = (
self._decrypter.Decrypt(self._encrypted_data))
self._decrypted_data_size = len(self._decrypted_data)
return read_count | python | def _ReadEncryptedData(self, read_size):
"""Reads encrypted data from the file-like object.
Args:
read_size (int): number of bytes of encrypted data to read.
Returns:
int: number of bytes of encrypted data read.
"""
encrypted_data = self._file_object.read(read_size)
read_count = len(encrypted_data)
self._encrypted_data = b''.join([self._encrypted_data, encrypted_data])
self._decrypted_data, self._encrypted_data = (
self._decrypter.Decrypt(self._encrypted_data))
self._decrypted_data_size = len(self._decrypted_data)
return read_count | [
"def",
"_ReadEncryptedData",
"(",
"self",
",",
"read_size",
")",
":",
"encrypted_data",
"=",
"self",
".",
"_file_object",
".",
"read",
"(",
"read_size",
")",
"read_count",
"=",
"len",
"(",
"encrypted_data",
")",
"self",
".",
"_encrypted_data",
"=",
"b''",
".... | Reads encrypted data from the file-like object.
Args:
read_size (int): number of bytes of encrypted data to read.
Returns:
int: number of bytes of encrypted data read. | [
"Reads",
"encrypted",
"data",
"from",
"the",
"file",
"-",
"like",
"object",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encrypted_stream_io.py#L172-L192 | train | 208,420 |
log2timeline/dfvfs | dfvfs/file_io/encrypted_stream_io.py | EncryptedStream.SetDecryptedStreamSize | def SetDecryptedStreamSize(self, decrypted_stream_size):
"""Sets the decrypted stream size.
This function is used to set the decrypted stream size if it can be
determined separately.
Args:
decrypted_stream_size (int): size of the decrypted stream in bytes.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the decrypted stream size is invalid.
"""
if self._is_open:
raise IOError('Already open.')
if decrypted_stream_size < 0:
raise ValueError((
'Invalid decrypted stream size: {0:d} value out of '
'bounds.').format(decrypted_stream_size))
self._decrypted_stream_size = decrypted_stream_size | python | def SetDecryptedStreamSize(self, decrypted_stream_size):
"""Sets the decrypted stream size.
This function is used to set the decrypted stream size if it can be
determined separately.
Args:
decrypted_stream_size (int): size of the decrypted stream in bytes.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the decrypted stream size is invalid.
"""
if self._is_open:
raise IOError('Already open.')
if decrypted_stream_size < 0:
raise ValueError((
'Invalid decrypted stream size: {0:d} value out of '
'bounds.').format(decrypted_stream_size))
self._decrypted_stream_size = decrypted_stream_size | [
"def",
"SetDecryptedStreamSize",
"(",
"self",
",",
"decrypted_stream_size",
")",
":",
"if",
"self",
".",
"_is_open",
":",
"raise",
"IOError",
"(",
"'Already open.'",
")",
"if",
"decrypted_stream_size",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"(",
"'Invalid de... | Sets the decrypted stream size.
This function is used to set the decrypted stream size if it can be
determined separately.
Args:
decrypted_stream_size (int): size of the decrypted stream in bytes.
Raises:
IOError: if the file-like object is already open.
OSError: if the file-like object is already open.
ValueError: if the decrypted stream size is invalid. | [
"Sets",
"the",
"decrypted",
"stream",
"size",
"."
] | 2b3ccd115f9901d89f383397d4a1376a873c83c4 | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/encrypted_stream_io.py#L194-L216 | train | 208,421 |
senaite/senaite.lims | src/senaite/lims/browser/bootstrap/viewlets.py | ViewletView.getViewletByName | def getViewletByName(self, name):
""" Viewlets allow through-the-web customizations.
Through-the-web customization magic is managed by five.customerize.
We need to think of this when looking up viewlets.
@return: Viewlet registration object
"""
views = registration.getViews(IBrowserRequest)
for v in views:
if v.provided == IViewlet:
# Note that we might have conflicting BrowserView with the same
# name, thus we need to check for provided
if v.name == name:
return v
return None | python | def getViewletByName(self, name):
""" Viewlets allow through-the-web customizations.
Through-the-web customization magic is managed by five.customerize.
We need to think of this when looking up viewlets.
@return: Viewlet registration object
"""
views = registration.getViews(IBrowserRequest)
for v in views:
if v.provided == IViewlet:
# Note that we might have conflicting BrowserView with the same
# name, thus we need to check for provided
if v.name == name:
return v
return None | [
"def",
"getViewletByName",
"(",
"self",
",",
"name",
")",
":",
"views",
"=",
"registration",
".",
"getViews",
"(",
"IBrowserRequest",
")",
"for",
"v",
"in",
"views",
":",
"if",
"v",
".",
"provided",
"==",
"IViewlet",
":",
"# Note that we might have conflicting... | Viewlets allow through-the-web customizations.
Through-the-web customization magic is managed by five.customerize.
We need to think of this when looking up viewlets.
@return: Viewlet registration object | [
"Viewlets",
"allow",
"through",
"-",
"the",
"-",
"web",
"customizations",
"."
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/bootstrap/viewlets.py#L162-L181 | train | 208,422 |
senaite/senaite.lims | src/senaite/lims/browser/bootstrap/viewlets.py | ViewletView.setupViewletByName | def setupViewletByName(self, name):
""" Constructs a viewlet instance by its name.
Viewlet update() and render() method are not called.
@return: Viewlet instance of None if viewlet with name does not exist
"""
context = aq_inner(self.context)
request = self.request
# Perform viewlet regisration look-up
# from adapters registry
reg = self.getViewletByName(name)
if reg is None:
return None
# factory method is responsible for creating the viewlet instance
factory = reg.factory
# Create viewlet and put it to the acquisition chain
# Viewlet need initialization parameters: context, request, view
try:
viewlet = factory(context, request, self, None).__of__(context)
except TypeError:
# Bad constructor call parameters
raise RuntimeError(
"Unable to initialize viewlet {}. "
"Factory method {} call failed."
.format(name, str(factory)))
return viewlet | python | def setupViewletByName(self, name):
""" Constructs a viewlet instance by its name.
Viewlet update() and render() method are not called.
@return: Viewlet instance of None if viewlet with name does not exist
"""
context = aq_inner(self.context)
request = self.request
# Perform viewlet regisration look-up
# from adapters registry
reg = self.getViewletByName(name)
if reg is None:
return None
# factory method is responsible for creating the viewlet instance
factory = reg.factory
# Create viewlet and put it to the acquisition chain
# Viewlet need initialization parameters: context, request, view
try:
viewlet = factory(context, request, self, None).__of__(context)
except TypeError:
# Bad constructor call parameters
raise RuntimeError(
"Unable to initialize viewlet {}. "
"Factory method {} call failed."
.format(name, str(factory)))
return viewlet | [
"def",
"setupViewletByName",
"(",
"self",
",",
"name",
")",
":",
"context",
"=",
"aq_inner",
"(",
"self",
".",
"context",
")",
"request",
"=",
"self",
".",
"request",
"# Perform viewlet regisration look-up",
"# from adapters registry",
"reg",
"=",
"self",
".",
"... | Constructs a viewlet instance by its name.
Viewlet update() and render() method are not called.
@return: Viewlet instance of None if viewlet with name does not exist | [
"Constructs",
"a",
"viewlet",
"instance",
"by",
"its",
"name",
"."
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/bootstrap/viewlets.py#L183-L213 | train | 208,423 |
senaite/senaite.lims | src/senaite/lims/setuphandlers.py | setup_handler | def setup_handler(context):
"""Generic setup handler
"""
if context.readDataFile('senaite.lims.txt') is None:
return
logger.info("SENAITE setup handler [BEGIN]")
portal = context.getSite() # noqa
# Custom setup handlers
setup_html_filter(portal)
logger.info("SENAITE setup handler [DONE]") | python | def setup_handler(context):
"""Generic setup handler
"""
if context.readDataFile('senaite.lims.txt') is None:
return
logger.info("SENAITE setup handler [BEGIN]")
portal = context.getSite() # noqa
# Custom setup handlers
setup_html_filter(portal)
logger.info("SENAITE setup handler [DONE]") | [
"def",
"setup_handler",
"(",
"context",
")",
":",
"if",
"context",
".",
"readDataFile",
"(",
"'senaite.lims.txt'",
")",
"is",
"None",
":",
"return",
"logger",
".",
"info",
"(",
"\"SENAITE setup handler [BEGIN]\"",
")",
"portal",
"=",
"context",
".",
"getSite",
... | Generic setup handler | [
"Generic",
"setup",
"handler"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/setuphandlers.py#L30-L43 | train | 208,424 |
senaite/senaite.lims | src/senaite/lims/setuphandlers.py | setup_html_filter | def setup_html_filter(portal):
"""Setup HTML filtering for resultsinterpretations
"""
logger.info("*** Setup HTML Filter ***")
# bypass the broken API from portal_transforms
adapter = IFilterSchema(portal)
style_whitelist = adapter.style_whitelist
for style in ALLOWED_STYLES:
logger.info("Allow style '{}'".format(style))
if style not in style_whitelist:
style_whitelist.append(style)
adapter.style_whitelist = style_whitelist | python | def setup_html_filter(portal):
"""Setup HTML filtering for resultsinterpretations
"""
logger.info("*** Setup HTML Filter ***")
# bypass the broken API from portal_transforms
adapter = IFilterSchema(portal)
style_whitelist = adapter.style_whitelist
for style in ALLOWED_STYLES:
logger.info("Allow style '{}'".format(style))
if style not in style_whitelist:
style_whitelist.append(style)
adapter.style_whitelist = style_whitelist | [
"def",
"setup_html_filter",
"(",
"portal",
")",
":",
"logger",
".",
"info",
"(",
"\"*** Setup HTML Filter ***\"",
")",
"# bypass the broken API from portal_transforms",
"adapter",
"=",
"IFilterSchema",
"(",
"portal",
")",
"style_whitelist",
"=",
"adapter",
".",
"style_w... | Setup HTML filtering for resultsinterpretations | [
"Setup",
"HTML",
"filtering",
"for",
"resultsinterpretations"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/setuphandlers.py#L46-L57 | train | 208,425 |
senaite/senaite.lims | src/senaite/lims/upgrades/handlers.py | to_1000 | def to_1000(portal_setup):
"""Initial version to 1000
:param portal_setup: The portal_setup tool
"""
logger.info("Run all import steps from SENAITE LIMS ...")
context = portal_setup._getImportContext(PROFILE_ID)
portal = context.getSite()
setup_html_filter(portal)
portal_setup.runAllImportStepsFromProfile(PROFILE_ID)
logger.info("Run all import steps from SENAITE LIMS [DONE]") | python | def to_1000(portal_setup):
"""Initial version to 1000
:param portal_setup: The portal_setup tool
"""
logger.info("Run all import steps from SENAITE LIMS ...")
context = portal_setup._getImportContext(PROFILE_ID)
portal = context.getSite()
setup_html_filter(portal)
portal_setup.runAllImportStepsFromProfile(PROFILE_ID)
logger.info("Run all import steps from SENAITE LIMS [DONE]") | [
"def",
"to_1000",
"(",
"portal_setup",
")",
":",
"logger",
".",
"info",
"(",
"\"Run all import steps from SENAITE LIMS ...\"",
")",
"context",
"=",
"portal_setup",
".",
"_getImportContext",
"(",
"PROFILE_ID",
")",
"portal",
"=",
"context",
".",
"getSite",
"(",
")"... | Initial version to 1000
:param portal_setup: The portal_setup tool | [
"Initial",
"version",
"to",
"1000"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/upgrades/handlers.py#L27-L38 | train | 208,426 |
senaite/senaite.lims | src/senaite/lims/browser/spotlight/jsonapi.py | spotlight_search_route | def spotlight_search_route(context, request):
"""The spotlight search route
"""
catalogs = [
CATALOG_ANALYSIS_REQUEST_LISTING,
"portal_catalog",
"bika_setup_catalog",
"bika_catalog",
"bika_catalog_worksheet_listing"
]
search_results = []
for catalog in catalogs:
search_results.extend(search(catalog=catalog))
# extract the data from all the brains
items = map(get_brain_info, search_results)
return {
"count": len(items),
"items": sorted(items, key=itemgetter("title")),
} | python | def spotlight_search_route(context, request):
"""The spotlight search route
"""
catalogs = [
CATALOG_ANALYSIS_REQUEST_LISTING,
"portal_catalog",
"bika_setup_catalog",
"bika_catalog",
"bika_catalog_worksheet_listing"
]
search_results = []
for catalog in catalogs:
search_results.extend(search(catalog=catalog))
# extract the data from all the brains
items = map(get_brain_info, search_results)
return {
"count": len(items),
"items": sorted(items, key=itemgetter("title")),
} | [
"def",
"spotlight_search_route",
"(",
"context",
",",
"request",
")",
":",
"catalogs",
"=",
"[",
"CATALOG_ANALYSIS_REQUEST_LISTING",
",",
"\"portal_catalog\"",
",",
"\"bika_setup_catalog\"",
",",
"\"bika_catalog\"",
",",
"\"bika_catalog_worksheet_listing\"",
"]",
"search_re... | The spotlight search route | [
"The",
"spotlight",
"search",
"route"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/spotlight/jsonapi.py#L31-L52 | train | 208,427 |
senaite/senaite.lims | src/senaite/lims/browser/spotlight/jsonapi.py | get_brain_info | def get_brain_info(brain):
"""Extract the brain info
"""
icon = api.get_icon(brain)
# avoid 404 errors with these guys
if "document_icon.gif" in icon:
icon = ""
id = api.get_id(brain)
url = api.get_url(brain)
title = api.get_title(brain)
description = api.get_description(brain)
parent = api.get_parent(brain)
parent_title = api.get_title(parent)
parent_url = api.get_url(parent)
return {
"id": id,
"title": title,
"title_or_id": title or id,
"description": description,
"url": url,
"parent_title": parent_title,
"parent_url": parent_url,
"icon": icon,
} | python | def get_brain_info(brain):
"""Extract the brain info
"""
icon = api.get_icon(brain)
# avoid 404 errors with these guys
if "document_icon.gif" in icon:
icon = ""
id = api.get_id(brain)
url = api.get_url(brain)
title = api.get_title(brain)
description = api.get_description(brain)
parent = api.get_parent(brain)
parent_title = api.get_title(parent)
parent_url = api.get_url(parent)
return {
"id": id,
"title": title,
"title_or_id": title or id,
"description": description,
"url": url,
"parent_title": parent_title,
"parent_url": parent_url,
"icon": icon,
} | [
"def",
"get_brain_info",
"(",
"brain",
")",
":",
"icon",
"=",
"api",
".",
"get_icon",
"(",
"brain",
")",
"# avoid 404 errors with these guys",
"if",
"\"document_icon.gif\"",
"in",
"icon",
":",
"icon",
"=",
"\"\"",
"id",
"=",
"api",
".",
"get_id",
"(",
"brain... | Extract the brain info | [
"Extract",
"the",
"brain",
"info"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/spotlight/jsonapi.py#L55-L80 | train | 208,428 |
senaite/senaite.lims | src/senaite/lims/browser/spotlight/jsonapi.py | get_search_index_for | def get_search_index_for(catalog):
"""Returns the search index to query
"""
searchable_text_index = "SearchableText"
listing_searchable_text_index = "listing_searchable_text"
if catalog == CATALOG_ANALYSIS_REQUEST_LISTING:
tool = api.get_tool(catalog)
indexes = tool.indexes()
if listing_searchable_text_index in indexes:
return listing_searchable_text_index
return searchable_text_index | python | def get_search_index_for(catalog):
"""Returns the search index to query
"""
searchable_text_index = "SearchableText"
listing_searchable_text_index = "listing_searchable_text"
if catalog == CATALOG_ANALYSIS_REQUEST_LISTING:
tool = api.get_tool(catalog)
indexes = tool.indexes()
if listing_searchable_text_index in indexes:
return listing_searchable_text_index
return searchable_text_index | [
"def",
"get_search_index_for",
"(",
"catalog",
")",
":",
"searchable_text_index",
"=",
"\"SearchableText\"",
"listing_searchable_text_index",
"=",
"\"listing_searchable_text\"",
"if",
"catalog",
"==",
"CATALOG_ANALYSIS_REQUEST_LISTING",
":",
"tool",
"=",
"api",
".",
"get_to... | Returns the search index to query | [
"Returns",
"the",
"search",
"index",
"to",
"query"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/spotlight/jsonapi.py#L94-L106 | train | 208,429 |
senaite/senaite.lims | src/senaite/lims/browser/spotlight/jsonapi.py | make_query | def make_query(catalog):
"""A function to prepare a query
"""
query = {}
request = api.get_request()
index = get_search_index_for(catalog)
limit = request.form.get("limit")
q = request.form.get("q")
if len(q) > 0:
query[index] = q + "*"
else:
return None
portal_type = request.form.get("portal_type")
if portal_type:
if not isinstance(portal_type, list):
portal_type = [portal_type]
query["portal_type"] = portal_type
if limit and limit.isdigit():
query["sort_limit"] = int(limit)
return query | python | def make_query(catalog):
"""A function to prepare a query
"""
query = {}
request = api.get_request()
index = get_search_index_for(catalog)
limit = request.form.get("limit")
q = request.form.get("q")
if len(q) > 0:
query[index] = q + "*"
else:
return None
portal_type = request.form.get("portal_type")
if portal_type:
if not isinstance(portal_type, list):
portal_type = [portal_type]
query["portal_type"] = portal_type
if limit and limit.isdigit():
query["sort_limit"] = int(limit)
return query | [
"def",
"make_query",
"(",
"catalog",
")",
":",
"query",
"=",
"{",
"}",
"request",
"=",
"api",
".",
"get_request",
"(",
")",
"index",
"=",
"get_search_index_for",
"(",
"catalog",
")",
"limit",
"=",
"request",
".",
"form",
".",
"get",
"(",
"\"limit\"",
"... | A function to prepare a query | [
"A",
"function",
"to",
"prepare",
"a",
"query"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/spotlight/jsonapi.py#L109-L132 | train | 208,430 |
senaite/senaite.lims | src/senaite/lims/browser/bootstrap/views.py | icon_cache_key | def icon_cache_key(method, self, brain_or_object):
"""Generates a cache key for the icon lookup
Includes the virtual URL to handle multiple HTTP/HTTPS domains
Example: http://senaite.local/clients?modified=1512033263370
"""
url = api.get_url(brain_or_object)
modified = api.get_modification_date(brain_or_object).millis()
key = "{}?modified={}".format(url, modified)
logger.debug("Generated Cache Key: {}".format(key))
return key | python | def icon_cache_key(method, self, brain_or_object):
"""Generates a cache key for the icon lookup
Includes the virtual URL to handle multiple HTTP/HTTPS domains
Example: http://senaite.local/clients?modified=1512033263370
"""
url = api.get_url(brain_or_object)
modified = api.get_modification_date(brain_or_object).millis()
key = "{}?modified={}".format(url, modified)
logger.debug("Generated Cache Key: {}".format(key))
return key | [
"def",
"icon_cache_key",
"(",
"method",
",",
"self",
",",
"brain_or_object",
")",
":",
"url",
"=",
"api",
".",
"get_url",
"(",
"brain_or_object",
")",
"modified",
"=",
"api",
".",
"get_modification_date",
"(",
"brain_or_object",
")",
".",
"millis",
"(",
")",... | Generates a cache key for the icon lookup
Includes the virtual URL to handle multiple HTTP/HTTPS domains
Example: http://senaite.local/clients?modified=1512033263370 | [
"Generates",
"a",
"cache",
"key",
"for",
"the",
"icon",
"lookup"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/bootstrap/views.py#L33-L43 | train | 208,431 |
senaite/senaite.lims | src/senaite/lims/browser/bootstrap/views.py | BootstrapView.get_icon_for | def get_icon_for(self, brain_or_object):
"""Get the navigation portlet icon for the brain or object
The cache key ensures that the lookup is done only once per domain name
"""
portal_types = api.get_tool("portal_types")
fti = portal_types.getTypeInfo(api.get_portal_type(brain_or_object))
icon = fti.getIcon()
if not icon:
return ""
# Always try to get the big icon for high-res displays
icon_big = icon.replace(".png", "_big.png")
# fall back to a default icon if the looked up icon does not exist
if self.context.restrictedTraverse(icon_big, None) is None:
icon_big = None
portal_url = api.get_url(api.get_portal())
title = api.get_title(brain_or_object)
html_tag = "<img title='{}' src='{}/{}' width='16' />".format(
title, portal_url, icon_big or icon)
logger.info("Generated Icon Tag for {}: {}".format(
api.get_path(brain_or_object), html_tag))
return html_tag | python | def get_icon_for(self, brain_or_object):
"""Get the navigation portlet icon for the brain or object
The cache key ensures that the lookup is done only once per domain name
"""
portal_types = api.get_tool("portal_types")
fti = portal_types.getTypeInfo(api.get_portal_type(brain_or_object))
icon = fti.getIcon()
if not icon:
return ""
# Always try to get the big icon for high-res displays
icon_big = icon.replace(".png", "_big.png")
# fall back to a default icon if the looked up icon does not exist
if self.context.restrictedTraverse(icon_big, None) is None:
icon_big = None
portal_url = api.get_url(api.get_portal())
title = api.get_title(brain_or_object)
html_tag = "<img title='{}' src='{}/{}' width='16' />".format(
title, portal_url, icon_big or icon)
logger.info("Generated Icon Tag for {}: {}".format(
api.get_path(brain_or_object), html_tag))
return html_tag | [
"def",
"get_icon_for",
"(",
"self",
",",
"brain_or_object",
")",
":",
"portal_types",
"=",
"api",
".",
"get_tool",
"(",
"\"portal_types\"",
")",
"fti",
"=",
"portal_types",
".",
"getTypeInfo",
"(",
"api",
".",
"get_portal_type",
"(",
"brain_or_object",
")",
")... | Get the navigation portlet icon for the brain or object
The cache key ensures that the lookup is done only once per domain name | [
"Get",
"the",
"navigation",
"portlet",
"icon",
"for",
"the",
"brain",
"or",
"object"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/bootstrap/views.py#L60-L81 | train | 208,432 |
senaite/senaite.lims | src/senaite/lims/browser/bootstrap/views.py | BootstrapView.getViewportValues | def getViewportValues(self, view=None):
"""Determine the value of the viewport meta-tag
"""
values = {
'width': 'device-width',
'initial-scale': '1.0',
}
return ','.join('%s=%s' % (k, v) for k, v in values.items()) | python | def getViewportValues(self, view=None):
"""Determine the value of the viewport meta-tag
"""
values = {
'width': 'device-width',
'initial-scale': '1.0',
}
return ','.join('%s=%s' % (k, v) for k, v in values.items()) | [
"def",
"getViewportValues",
"(",
"self",
",",
"view",
"=",
"None",
")",
":",
"values",
"=",
"{",
"'width'",
":",
"'device-width'",
",",
"'initial-scale'",
":",
"'1.0'",
",",
"}",
"return",
"','",
".",
"join",
"(",
"'%s=%s'",
"%",
"(",
"k",
",",
"v",
... | Determine the value of the viewport meta-tag | [
"Determine",
"the",
"value",
"of",
"the",
"viewport",
"meta",
"-",
"tag"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/bootstrap/views.py#L83-L91 | train | 208,433 |
senaite/senaite.lims | src/senaite/lims/browser/bootstrap/views.py | BootstrapView.getColumnsClasses | def getColumnsClasses(self, view=None):
"""Determine whether a column should be shown. The left column is
called plone.leftcolumn; the right column is called
plone.rightcolumn.
"""
plone_view = getMultiAdapter(
(self.context, self.request), name=u'plone')
portal_state = getMultiAdapter(
(self.context, self.request), name=u'plone_portal_state')
sl = plone_view.have_portlets('plone.leftcolumn', view=view)
sr = plone_view.have_portlets('plone.rightcolumn', view=view)
isRTL = portal_state.is_rtl()
# pre-fill dictionary
columns = dict(one="", content="", two="")
if not sl and not sr:
# we don't have columns, thus conten takes the whole width
columns['content'] = "col-md-12"
elif sl and sr:
# In case we have both columns, content takes 50% of the whole
# width and the rest 50% is spread between the columns
columns['one'] = "col-xs-12 col-md-2"
columns['content'] = "col-xs-12 col-md-8"
columns['two'] = "col-xs-12 col-md-2"
elif (sr and not sl) and not isRTL:
# We have right column and we are NOT in RTL language
columns['content'] = "col-xs-12 col-md-10"
columns['two'] = "col-xs-12 col-md-2"
elif (sl and not sr) and isRTL:
# We have left column and we are in RTL language
columns['one'] = "col-xs-12 col-md-2"
columns['content'] = "col-xs-12 col-md-10"
elif (sl and not sr) and not isRTL:
# We have left column and we are in NOT RTL language
columns['one'] = "col-xs-12 col-md-2"
columns['content'] = "col-xs-12 col-md-10"
# # append cell to each css-string
# for key, value in columns.items():
# columns[key] = "cell " + value
return columns | python | def getColumnsClasses(self, view=None):
"""Determine whether a column should be shown. The left column is
called plone.leftcolumn; the right column is called
plone.rightcolumn.
"""
plone_view = getMultiAdapter(
(self.context, self.request), name=u'plone')
portal_state = getMultiAdapter(
(self.context, self.request), name=u'plone_portal_state')
sl = plone_view.have_portlets('plone.leftcolumn', view=view)
sr = plone_view.have_portlets('plone.rightcolumn', view=view)
isRTL = portal_state.is_rtl()
# pre-fill dictionary
columns = dict(one="", content="", two="")
if not sl and not sr:
# we don't have columns, thus conten takes the whole width
columns['content'] = "col-md-12"
elif sl and sr:
# In case we have both columns, content takes 50% of the whole
# width and the rest 50% is spread between the columns
columns['one'] = "col-xs-12 col-md-2"
columns['content'] = "col-xs-12 col-md-8"
columns['two'] = "col-xs-12 col-md-2"
elif (sr and not sl) and not isRTL:
# We have right column and we are NOT in RTL language
columns['content'] = "col-xs-12 col-md-10"
columns['two'] = "col-xs-12 col-md-2"
elif (sl and not sr) and isRTL:
# We have left column and we are in RTL language
columns['one'] = "col-xs-12 col-md-2"
columns['content'] = "col-xs-12 col-md-10"
elif (sl and not sr) and not isRTL:
# We have left column and we are in NOT RTL language
columns['one'] = "col-xs-12 col-md-2"
columns['content'] = "col-xs-12 col-md-10"
# # append cell to each css-string
# for key, value in columns.items():
# columns[key] = "cell " + value
return columns | [
"def",
"getColumnsClasses",
"(",
"self",
",",
"view",
"=",
"None",
")",
":",
"plone_view",
"=",
"getMultiAdapter",
"(",
"(",
"self",
".",
"context",
",",
"self",
".",
"request",
")",
",",
"name",
"=",
"u'plone'",
")",
"portal_state",
"=",
"getMultiAdapter"... | Determine whether a column should be shown. The left column is
called plone.leftcolumn; the right column is called
plone.rightcolumn. | [
"Determine",
"whether",
"a",
"column",
"should",
"be",
"shown",
".",
"The",
"left",
"column",
"is",
"called",
"plone",
".",
"leftcolumn",
";",
"the",
"right",
"column",
"is",
"called",
"plone",
".",
"rightcolumn",
"."
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/bootstrap/views.py#L93-L142 | train | 208,434 |
senaite/senaite.lims | src/senaite/lims/browser/controlpanel/views/setupview.py | SetupView.setupitems | def setupitems(self):
"""Lookup available setup items
:returns: catalog brains
"""
query = {
"path": {
"query": api.get_path(self.setup),
"depth": 1,
},
}
items = api.search(query, "portal_catalog")
# filter out items
items = filter(lambda item: not item.exclude_from_nav, items)
# sort by (translated) title
def cmp_by_translated_title(brain1, brain2):
title1 = t(api.get_title(brain1))
title2 = t(api.get_title(brain2))
return cmp(title1, title2)
return sorted(items, cmp=cmp_by_translated_title) | python | def setupitems(self):
"""Lookup available setup items
:returns: catalog brains
"""
query = {
"path": {
"query": api.get_path(self.setup),
"depth": 1,
},
}
items = api.search(query, "portal_catalog")
# filter out items
items = filter(lambda item: not item.exclude_from_nav, items)
# sort by (translated) title
def cmp_by_translated_title(brain1, brain2):
title1 = t(api.get_title(brain1))
title2 = t(api.get_title(brain2))
return cmp(title1, title2)
return sorted(items, cmp=cmp_by_translated_title) | [
"def",
"setupitems",
"(",
"self",
")",
":",
"query",
"=",
"{",
"\"path\"",
":",
"{",
"\"query\"",
":",
"api",
".",
"get_path",
"(",
"self",
".",
"setup",
")",
",",
"\"depth\"",
":",
"1",
",",
"}",
",",
"}",
"items",
"=",
"api",
".",
"search",
"("... | Lookup available setup items
:returns: catalog brains | [
"Lookup",
"available",
"setup",
"items"
] | 3c7fc7b462321fb354c478c19b5c20f3014fa398 | https://github.com/senaite/senaite.lims/blob/3c7fc7b462321fb354c478c19b5c20f3014fa398/src/senaite/lims/browser/controlpanel/views/setupview.py#L83-L104 | train | 208,435 |
twitterdev/twitter-python-ads-sdk | twitter_ads/http.py | TONUpload.content_type | def content_type(self):
"""Returns the content-type value determined by file extension."""
if hasattr(self, '_content_type'):
return self._content_type
filename, extension = os.path.splitext(self._file_path)
if extension == '.csv':
self._content_type = 'text/csv'
elif extension == '.tsv':
self._content_type = 'text/tab-separated-values'
else:
self._content_type = 'text/plain'
return self._content_type | python | def content_type(self):
"""Returns the content-type value determined by file extension."""
if hasattr(self, '_content_type'):
return self._content_type
filename, extension = os.path.splitext(self._file_path)
if extension == '.csv':
self._content_type = 'text/csv'
elif extension == '.tsv':
self._content_type = 'text/tab-separated-values'
else:
self._content_type = 'text/plain'
return self._content_type | [
"def",
"content_type",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_content_type'",
")",
":",
"return",
"self",
".",
"_content_type",
"filename",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"_file_path",
"... | Returns the content-type value determined by file extension. | [
"Returns",
"the",
"content",
"-",
"type",
"value",
"determined",
"by",
"file",
"extension",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/http.py#L226-L240 | train | 208,436 |
twitterdev/twitter-python-ads-sdk | twitter_ads/http.py | TONUpload.perform | def perform(self):
"""Executes the current TONUpload object."""
if self._file_size < self._SINGLE_UPLOAD_MAX:
resource = "{0}{1}".format(self._DEFAULT_RESOURCE, self.bucket)
response = self.__upload(resource, open(self._file_path, 'rb').read())
return response.headers['location']
else:
response = self.__init_chunked_upload()
min_chunk_size = int(response.headers['x-ton-min-chunk-size'])
chunk_size = min_chunk_size * self._DEFAULT_CHUNK_SIZE
location = response.headers['location']
f = open(self._file_path, 'rb')
bytes_read = 0
while True:
bytes = f.read(chunk_size)
if not bytes:
break
bytes_start = bytes_read
bytes_read += len(bytes)
response = self.__upload_chunk(location, chunk_size, bytes, bytes_start, bytes_read)
response_time = int(response.headers['x-response-time'])
chunk_size = min_chunk_size * size(self._DEFAULT_CHUNK_SIZE,
self._RESPONSE_TIME_MAX,
response_time)
f.close()
return location.split("?")[0] | python | def perform(self):
"""Executes the current TONUpload object."""
if self._file_size < self._SINGLE_UPLOAD_MAX:
resource = "{0}{1}".format(self._DEFAULT_RESOURCE, self.bucket)
response = self.__upload(resource, open(self._file_path, 'rb').read())
return response.headers['location']
else:
response = self.__init_chunked_upload()
min_chunk_size = int(response.headers['x-ton-min-chunk-size'])
chunk_size = min_chunk_size * self._DEFAULT_CHUNK_SIZE
location = response.headers['location']
f = open(self._file_path, 'rb')
bytes_read = 0
while True:
bytes = f.read(chunk_size)
if not bytes:
break
bytes_start = bytes_read
bytes_read += len(bytes)
response = self.__upload_chunk(location, chunk_size, bytes, bytes_start, bytes_read)
response_time = int(response.headers['x-response-time'])
chunk_size = min_chunk_size * size(self._DEFAULT_CHUNK_SIZE,
self._RESPONSE_TIME_MAX,
response_time)
f.close()
return location.split("?")[0] | [
"def",
"perform",
"(",
"self",
")",
":",
"if",
"self",
".",
"_file_size",
"<",
"self",
".",
"_SINGLE_UPLOAD_MAX",
":",
"resource",
"=",
"\"{0}{1}\"",
".",
"format",
"(",
"self",
".",
"_DEFAULT_RESOURCE",
",",
"self",
".",
"bucket",
")",
"response",
"=",
... | Executes the current TONUpload object. | [
"Executes",
"the",
"current",
"TONUpload",
"object",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/http.py#L242-L270 | train | 208,437 |
twitterdev/twitter-python-ads-sdk | twitter_ads/http.py | TONUpload.__upload | def __upload(self, resource, bytes):
"""Performs a single chunk upload."""
# note: string conversion required here due to open encoding bug in requests-oauthlib.
headers = {
'x-ton-expires': http_time(self.options.get('x-ton-expires', self._DEFAULT_EXPIRE)),
'content-length': str(self._file_size),
'content-type': self.content_type
}
return Request(self._client, 'post', resource,
domain=self._DEFAULT_DOMAIN, headers=headers, body=bytes).perform() | python | def __upload(self, resource, bytes):
"""Performs a single chunk upload."""
# note: string conversion required here due to open encoding bug in requests-oauthlib.
headers = {
'x-ton-expires': http_time(self.options.get('x-ton-expires', self._DEFAULT_EXPIRE)),
'content-length': str(self._file_size),
'content-type': self.content_type
}
return Request(self._client, 'post', resource,
domain=self._DEFAULT_DOMAIN, headers=headers, body=bytes).perform() | [
"def",
"__upload",
"(",
"self",
",",
"resource",
",",
"bytes",
")",
":",
"# note: string conversion required here due to open encoding bug in requests-oauthlib.",
"headers",
"=",
"{",
"'x-ton-expires'",
":",
"http_time",
"(",
"self",
".",
"options",
".",
"get",
"(",
"... | Performs a single chunk upload. | [
"Performs",
"a",
"single",
"chunk",
"upload",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/http.py#L280-L291 | train | 208,438 |
twitterdev/twitter-python-ads-sdk | twitter_ads/http.py | TONUpload.__init_chunked_upload | def __init_chunked_upload(self):
"""Initialization for a multi-chunk upload."""
# note: string conversion required here due to open encoding bug in requests-oauthlib.
headers = {
'x-ton-content-type': self.content_type,
'x-ton-content-length': str(self._file_size),
'x-ton-expires': http_time(self.options.get('x-ton-expires', self._DEFAULT_EXPIRE)),
'content-length': str(0),
'content-type': self.content_type
}
resource = "{0}{1}?resumable=true".format(self._DEFAULT_RESOURCE, self._DEFAULT_BUCKET)
return Request(self._client, 'post', resource,
domain=self._DEFAULT_DOMAIN, headers=headers).perform() | python | def __init_chunked_upload(self):
"""Initialization for a multi-chunk upload."""
# note: string conversion required here due to open encoding bug in requests-oauthlib.
headers = {
'x-ton-content-type': self.content_type,
'x-ton-content-length': str(self._file_size),
'x-ton-expires': http_time(self.options.get('x-ton-expires', self._DEFAULT_EXPIRE)),
'content-length': str(0),
'content-type': self.content_type
}
resource = "{0}{1}?resumable=true".format(self._DEFAULT_RESOURCE, self._DEFAULT_BUCKET)
return Request(self._client, 'post', resource,
domain=self._DEFAULT_DOMAIN, headers=headers).perform() | [
"def",
"__init_chunked_upload",
"(",
"self",
")",
":",
"# note: string conversion required here due to open encoding bug in requests-oauthlib.",
"headers",
"=",
"{",
"'x-ton-content-type'",
":",
"self",
".",
"content_type",
",",
"'x-ton-content-length'",
":",
"str",
"(",
"sel... | Initialization for a multi-chunk upload. | [
"Initialization",
"for",
"a",
"multi",
"-",
"chunk",
"upload",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/http.py#L293-L308 | train | 208,439 |
twitterdev/twitter-python-ads-sdk | twitter_ads/http.py | TONUpload.__upload_chunk | def __upload_chunk(self, resource, chunk_size, bytes, bytes_start, bytes_read):
"""Uploads a single chunk of a multi-chunk upload."""
# note: string conversion required here due to open encoding bug in requests-oauthlib.
headers = {
'content-type': self.content_type,
'content-length': str(min([chunk_size, self._file_size - bytes_read])),
'content-range': "bytes {0}-{1}/{2}".format(
bytes_start, bytes_read - 1, self._file_size)
}
return Request(self._client, 'put', resource,
domain=self._DEFAULT_DOMAIN, headers=headers, body=bytes).perform() | python | def __upload_chunk(self, resource, chunk_size, bytes, bytes_start, bytes_read):
"""Uploads a single chunk of a multi-chunk upload."""
# note: string conversion required here due to open encoding bug in requests-oauthlib.
headers = {
'content-type': self.content_type,
'content-length': str(min([chunk_size, self._file_size - bytes_read])),
'content-range': "bytes {0}-{1}/{2}".format(
bytes_start, bytes_read - 1, self._file_size)
}
return Request(self._client, 'put', resource,
domain=self._DEFAULT_DOMAIN, headers=headers, body=bytes).perform() | [
"def",
"__upload_chunk",
"(",
"self",
",",
"resource",
",",
"chunk_size",
",",
"bytes",
",",
"bytes_start",
",",
"bytes_read",
")",
":",
"# note: string conversion required here due to open encoding bug in requests-oauthlib.",
"headers",
"=",
"{",
"'content-type'",
":",
"... | Uploads a single chunk of a multi-chunk upload. | [
"Uploads",
"a",
"single",
"chunk",
"of",
"a",
"multi",
"-",
"chunk",
"upload",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/http.py#L310-L322 | train | 208,440 |
twitterdev/twitter-python-ads-sdk | twitter_ads/cursor.py | Cursor.next | def next(self):
"""Returns the next item in the cursor."""
if self._current_index < len(self._collection):
value = self._collection[self._current_index]
self._current_index += 1
return value
elif self._next_cursor:
self.__fetch_next()
return self.next()
else:
self._current_index = 0
raise StopIteration | python | def next(self):
"""Returns the next item in the cursor."""
if self._current_index < len(self._collection):
value = self._collection[self._current_index]
self._current_index += 1
return value
elif self._next_cursor:
self.__fetch_next()
return self.next()
else:
self._current_index = 0
raise StopIteration | [
"def",
"next",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current_index",
"<",
"len",
"(",
"self",
".",
"_collection",
")",
":",
"value",
"=",
"self",
".",
"_collection",
"[",
"self",
".",
"_current_index",
"]",
"self",
".",
"_current_index",
"+=",
"1... | Returns the next item in the cursor. | [
"Returns",
"the",
"next",
"item",
"in",
"the",
"cursor",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/cursor.py#L62-L73 | train | 208,441 |
twitterdev/twitter-python-ads-sdk | twitter_ads/creative.py | PromotedTweet.save | def save(self):
"""
Saves or updates the current object instance depending on the
presence of `object.id`.
"""
params = self.to_params()
if 'tweet_id' in params:
params['tweet_ids'] = [params['tweet_id']]
del params['tweet_id']
if self.id:
raise HTTPError("Method PUT not allowed.")
resource = self.RESOURCE_COLLECTION.format(account_id=self.account.id)
response = Request(self.account.client, 'post', resource, params=params).perform()
return self.from_response(response.body['data'][0]) | python | def save(self):
"""
Saves or updates the current object instance depending on the
presence of `object.id`.
"""
params = self.to_params()
if 'tweet_id' in params:
params['tweet_ids'] = [params['tweet_id']]
del params['tweet_id']
if self.id:
raise HTTPError("Method PUT not allowed.")
resource = self.RESOURCE_COLLECTION.format(account_id=self.account.id)
response = Request(self.account.client, 'post', resource, params=params).perform()
return self.from_response(response.body['data'][0]) | [
"def",
"save",
"(",
"self",
")",
":",
"params",
"=",
"self",
".",
"to_params",
"(",
")",
"if",
"'tweet_id'",
"in",
"params",
":",
"params",
"[",
"'tweet_ids'",
"]",
"=",
"[",
"params",
"[",
"'tweet_id'",
"]",
"]",
"del",
"params",
"[",
"'tweet_id'",
... | Saves or updates the current object instance depending on the
presence of `object.id`. | [
"Saves",
"or",
"updates",
"the",
"current",
"object",
"instance",
"depending",
"on",
"the",
"presence",
"of",
"object",
".",
"id",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/creative.py#L42-L57 | train | 208,442 |
twitterdev/twitter-python-ads-sdk | twitter_ads/creative.py | ScheduledTweet.preview | def preview(self):
"""
Returns an HTML preview for a Scheduled Tweet.
"""
if self.id:
resource = self.PREVIEW
resource = resource.format(account_id=self.account.id, id=self.id)
response = Request(self.account.client, 'get', resource).perform()
return response.body['data'] | python | def preview(self):
"""
Returns an HTML preview for a Scheduled Tweet.
"""
if self.id:
resource = self.PREVIEW
resource = resource.format(account_id=self.account.id, id=self.id)
response = Request(self.account.client, 'get', resource).perform()
return response.body['data'] | [
"def",
"preview",
"(",
"self",
")",
":",
"if",
"self",
".",
"id",
":",
"resource",
"=",
"self",
".",
"PREVIEW",
"resource",
"=",
"resource",
".",
"format",
"(",
"account_id",
"=",
"self",
".",
"account",
".",
"id",
",",
"id",
"=",
"self",
".",
"id"... | Returns an HTML preview for a Scheduled Tweet. | [
"Returns",
"an",
"HTML",
"preview",
"for",
"a",
"Scheduled",
"Tweet",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/creative.py#L327-L335 | train | 208,443 |
twitterdev/twitter-python-ads-sdk | twitter_ads/account.py | Account.load | def load(klass, client, id, **kwargs):
"""Returns an object instance for a given resource."""
resource = klass.RESOURCE.format(id=id)
response = Request(client, 'get', resource, params=kwargs).perform()
return klass(client).from_response(response.body['data']) | python | def load(klass, client, id, **kwargs):
"""Returns an object instance for a given resource."""
resource = klass.RESOURCE.format(id=id)
response = Request(client, 'get', resource, params=kwargs).perform()
return klass(client).from_response(response.body['data']) | [
"def",
"load",
"(",
"klass",
",",
"client",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"resource",
"=",
"klass",
".",
"RESOURCE",
".",
"format",
"(",
"id",
"=",
"id",
")",
"response",
"=",
"Request",
"(",
"client",
",",
"'get'",
",",
"resource",... | Returns an object instance for a given resource. | [
"Returns",
"an",
"object",
"instance",
"for",
"a",
"given",
"resource",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/account.py#L44-L48 | train | 208,444 |
twitterdev/twitter-python-ads-sdk | twitter_ads/account.py | Account.all | def all(klass, client, **kwargs):
"""Returns a Cursor instance for a given resource."""
resource = klass.RESOURCE_COLLECTION
request = Request(client, 'get', resource, params=kwargs)
return Cursor(klass, request, init_with=[client]) | python | def all(klass, client, **kwargs):
"""Returns a Cursor instance for a given resource."""
resource = klass.RESOURCE_COLLECTION
request = Request(client, 'get', resource, params=kwargs)
return Cursor(klass, request, init_with=[client]) | [
"def",
"all",
"(",
"klass",
",",
"client",
",",
"*",
"*",
"kwargs",
")",
":",
"resource",
"=",
"klass",
".",
"RESOURCE_COLLECTION",
"request",
"=",
"Request",
"(",
"client",
",",
"'get'",
",",
"resource",
",",
"params",
"=",
"kwargs",
")",
"return",
"C... | Returns a Cursor instance for a given resource. | [
"Returns",
"a",
"Cursor",
"instance",
"for",
"a",
"given",
"resource",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/account.py#L51-L55 | train | 208,445 |
twitterdev/twitter-python-ads-sdk | twitter_ads/account.py | Account.features | def features(self):
"""
Returns a collection of features available to the current account.
"""
self._validate_loaded()
resource = self.FEATURES.format(id=self.id)
response = Request(self.client, 'get', resource).perform()
return response.body['data'] | python | def features(self):
"""
Returns a collection of features available to the current account.
"""
self._validate_loaded()
resource = self.FEATURES.format(id=self.id)
response = Request(self.client, 'get', resource).perform()
return response.body['data'] | [
"def",
"features",
"(",
"self",
")",
":",
"self",
".",
"_validate_loaded",
"(",
")",
"resource",
"=",
"self",
".",
"FEATURES",
".",
"format",
"(",
"id",
"=",
"self",
".",
"id",
")",
"response",
"=",
"Request",
"(",
"self",
".",
"client",
",",
"'get'"... | Returns a collection of features available to the current account. | [
"Returns",
"a",
"collection",
"of",
"features",
"available",
"to",
"the",
"current",
"account",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/account.py#L72-L81 | train | 208,446 |
twitterdev/twitter-python-ads-sdk | twitter_ads/account.py | Account.scoped_timeline | def scoped_timeline(self, *id, **kwargs):
"""
Returns the most recent promotable Tweets created by the specified Twitter user.
"""
self._validate_loaded()
params = {'user_id': id}
params.update(kwargs)
resource = self.SCOPED_TIMELINE.format(id=self.id)
response = Request(self.client, 'get', resource, params=params).perform()
return response.body['data'] | python | def scoped_timeline(self, *id, **kwargs):
"""
Returns the most recent promotable Tweets created by the specified Twitter user.
"""
self._validate_loaded()
params = {'user_id': id}
params.update(kwargs)
resource = self.SCOPED_TIMELINE.format(id=self.id)
response = Request(self.client, 'get', resource, params=params).perform()
return response.body['data'] | [
"def",
"scoped_timeline",
"(",
"self",
",",
"*",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_validate_loaded",
"(",
")",
"params",
"=",
"{",
"'user_id'",
":",
"id",
"}",
"params",
".",
"update",
"(",
"kwargs",
")",
"resource",
"=",
"self"... | Returns the most recent promotable Tweets created by the specified Twitter user. | [
"Returns",
"the",
"most",
"recent",
"promotable",
"Tweets",
"created",
"by",
"the",
"specified",
"Twitter",
"user",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/account.py#L158-L170 | train | 208,447 |
twitterdev/twitter-python-ads-sdk | twitter_ads/utils.py | get_version | def get_version():
"""Returns a string representation of the current SDK version."""
if isinstance(VERSION[-1], str):
return '.'.join(map(str, VERSION[:-1])) + VERSION[-1]
return '.'.join(map(str, VERSION)) | python | def get_version():
"""Returns a string representation of the current SDK version."""
if isinstance(VERSION[-1], str):
return '.'.join(map(str, VERSION[:-1])) + VERSION[-1]
return '.'.join(map(str, VERSION)) | [
"def",
"get_version",
"(",
")",
":",
"if",
"isinstance",
"(",
"VERSION",
"[",
"-",
"1",
"]",
",",
"str",
")",
":",
"return",
"'.'",
".",
"join",
"(",
"map",
"(",
"str",
",",
"VERSION",
"[",
":",
"-",
"1",
"]",
")",
")",
"+",
"VERSION",
"[",
"... | Returns a string representation of the current SDK version. | [
"Returns",
"a",
"string",
"representation",
"of",
"the",
"current",
"SDK",
"version",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/utils.py#L14-L18 | train | 208,448 |
twitterdev/twitter-python-ads-sdk | twitter_ads/utils.py | to_time | def to_time(time, granularity):
"""Returns a truncated and rounded time string based on the specified granularity."""
if not granularity:
if type(time) is datetime.date:
return format_date(time)
else:
return format_time(time)
if granularity == GRANULARITY.HOUR:
return format_time(remove_minutes(time))
elif granularity == GRANULARITY.DAY:
return format_date(remove_hours(time))
else:
return format_time(time) | python | def to_time(time, granularity):
"""Returns a truncated and rounded time string based on the specified granularity."""
if not granularity:
if type(time) is datetime.date:
return format_date(time)
else:
return format_time(time)
if granularity == GRANULARITY.HOUR:
return format_time(remove_minutes(time))
elif granularity == GRANULARITY.DAY:
return format_date(remove_hours(time))
else:
return format_time(time) | [
"def",
"to_time",
"(",
"time",
",",
"granularity",
")",
":",
"if",
"not",
"granularity",
":",
"if",
"type",
"(",
"time",
")",
"is",
"datetime",
".",
"date",
":",
"return",
"format_date",
"(",
"time",
")",
"else",
":",
"return",
"format_time",
"(",
"tim... | Returns a truncated and rounded time string based on the specified granularity. | [
"Returns",
"a",
"truncated",
"and",
"rounded",
"time",
"string",
"based",
"on",
"the",
"specified",
"granularity",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/utils.py#L31-L43 | train | 208,449 |
twitterdev/twitter-python-ads-sdk | twitter_ads/utils.py | http_time | def http_time(time):
"""Formats a datetime as an RFC 1123 compliant string."""
return formatdate(timeval=mktime(time.timetuple()), localtime=False, usegmt=True) | python | def http_time(time):
"""Formats a datetime as an RFC 1123 compliant string."""
return formatdate(timeval=mktime(time.timetuple()), localtime=False, usegmt=True) | [
"def",
"http_time",
"(",
"time",
")",
":",
"return",
"formatdate",
"(",
"timeval",
"=",
"mktime",
"(",
"time",
".",
"timetuple",
"(",
")",
")",
",",
"localtime",
"=",
"False",
",",
"usegmt",
"=",
"True",
")"
] | Formats a datetime as an RFC 1123 compliant string. | [
"Formats",
"a",
"datetime",
"as",
"an",
"RFC",
"1123",
"compliant",
"string",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/utils.py#L56-L58 | train | 208,450 |
twitterdev/twitter-python-ads-sdk | twitter_ads/utils.py | size | def size(default_chunk_size, response_time_max, response_time_actual):
"""Determines the chunk size based on response times."""
if response_time_actual == 0:
response_time_actual = 1
scale = 1 / (response_time_actual / response_time_max)
size = int(default_chunk_size * scale)
return min(max(size, 1), default_chunk_size) | python | def size(default_chunk_size, response_time_max, response_time_actual):
"""Determines the chunk size based on response times."""
if response_time_actual == 0:
response_time_actual = 1
scale = 1 / (response_time_actual / response_time_max)
size = int(default_chunk_size * scale)
return min(max(size, 1), default_chunk_size) | [
"def",
"size",
"(",
"default_chunk_size",
",",
"response_time_max",
",",
"response_time_actual",
")",
":",
"if",
"response_time_actual",
"==",
"0",
":",
"response_time_actual",
"=",
"1",
"scale",
"=",
"1",
"/",
"(",
"response_time_actual",
"/",
"response_time_max",
... | Determines the chunk size based on response times. | [
"Determines",
"the",
"chunk",
"size",
"based",
"on",
"response",
"times",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/utils.py#L61-L67 | train | 208,451 |
twitterdev/twitter-python-ads-sdk | twitter_ads/client.py | Client.sandbox | def sandbox():
"""Enables and disables sandbox mode."""
def fget(self):
return self._options.get('sandbox', None)
def fset(self, value):
self._options['sandbox'] = value
return locals() | python | def sandbox():
"""Enables and disables sandbox mode."""
def fget(self):
return self._options.get('sandbox', None)
def fset(self, value):
self._options['sandbox'] = value
return locals() | [
"def",
"sandbox",
"(",
")",
":",
"def",
"fget",
"(",
"self",
")",
":",
"return",
"self",
".",
"_options",
".",
"get",
"(",
"'sandbox'",
",",
"None",
")",
"def",
"fset",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_options",
"[",
"'sandbox'",... | Enables and disables sandbox mode. | [
"Enables",
"and",
"disables",
"sandbox",
"mode",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/client.py#L60-L68 | train | 208,452 |
twitterdev/twitter-python-ads-sdk | twitter_ads/client.py | Client.trace | def trace():
"""Enables and disables request tracing."""
def fget(self):
return self._options.get('trace', None)
def fset(self, value):
self._options['trace'] = value
return locals() | python | def trace():
"""Enables and disables request tracing."""
def fget(self):
return self._options.get('trace', None)
def fset(self, value):
self._options['trace'] = value
return locals() | [
"def",
"trace",
"(",
")",
":",
"def",
"fget",
"(",
"self",
")",
":",
"return",
"self",
".",
"_options",
".",
"get",
"(",
"'trace'",
",",
"None",
")",
"def",
"fset",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_options",
"[",
"'trace'",
"]"... | Enables and disables request tracing. | [
"Enables",
"and",
"disables",
"request",
"tracing",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/client.py#L72-L80 | train | 208,453 |
twitterdev/twitter-python-ads-sdk | twitter_ads/campaign.py | TargetingCriteria.platform_versions | def platform_versions(klass, account, **kwargs):
"""Returns a list of supported platform versions"""
resource = klass.RESOURCE_OPTIONS + 'platform_versions'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request) | python | def platform_versions(klass, account, **kwargs):
"""Returns a list of supported platform versions"""
resource = klass.RESOURCE_OPTIONS + 'platform_versions'
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(None, request) | [
"def",
"platform_versions",
"(",
"klass",
",",
"account",
",",
"*",
"*",
"kwargs",
")",
":",
"resource",
"=",
"klass",
".",
"RESOURCE_OPTIONS",
"+",
"'platform_versions'",
"request",
"=",
"Request",
"(",
"account",
".",
"client",
",",
"'get'",
",",
"resource... | Returns a list of supported platform versions | [
"Returns",
"a",
"list",
"of",
"supported",
"platform",
"versions"
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/campaign.py#L104-L108 | train | 208,454 |
twitterdev/twitter-python-ads-sdk | twitter_ads/campaign.py | LineItem.targeting_criteria | def targeting_criteria(self, id=None, **kwargs):
"""
Returns a collection of targeting criteria available to the
current line item.
"""
self._validate_loaded()
if id is None:
return TargetingCriteria.all(self.account, self.id, **kwargs)
else:
return TargetingCriteria.load(self.account, id, **kwargs) | python | def targeting_criteria(self, id=None, **kwargs):
"""
Returns a collection of targeting criteria available to the
current line item.
"""
self._validate_loaded()
if id is None:
return TargetingCriteria.all(self.account, self.id, **kwargs)
else:
return TargetingCriteria.load(self.account, id, **kwargs) | [
"def",
"targeting_criteria",
"(",
"self",
",",
"id",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_validate_loaded",
"(",
")",
"if",
"id",
"is",
"None",
":",
"return",
"TargetingCriteria",
".",
"all",
"(",
"self",
".",
"account",
",",
... | Returns a collection of targeting criteria available to the
current line item. | [
"Returns",
"a",
"collection",
"of",
"targeting",
"criteria",
"available",
"to",
"the",
"current",
"line",
"item",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/campaign.py#L262-L271 | train | 208,455 |
twitterdev/twitter-python-ads-sdk | twitter_ads/campaign.py | Tweet.preview | def preview(klass, account, **kwargs):
"""
Returns an HTML preview of a tweet, either new or existing.
"""
params = {}
params.update(kwargs)
# handles array to string conversion for media IDs
if 'media_ids' in params and isinstance(params['media_ids'], list):
params['media_ids'] = ','.join(map(str, params['media_ids']))
resource = klass.TWEET_ID_PREVIEW if params.get('id') else klass.TWEET_PREVIEW
resource = resource.format(account_id=account.id, id=params.get('id'))
response = Request(account.client, 'get', resource, params=params).perform()
return response.body['data'] | python | def preview(klass, account, **kwargs):
"""
Returns an HTML preview of a tweet, either new or existing.
"""
params = {}
params.update(kwargs)
# handles array to string conversion for media IDs
if 'media_ids' in params and isinstance(params['media_ids'], list):
params['media_ids'] = ','.join(map(str, params['media_ids']))
resource = klass.TWEET_ID_PREVIEW if params.get('id') else klass.TWEET_PREVIEW
resource = resource.format(account_id=account.id, id=params.get('id'))
response = Request(account.client, 'get', resource, params=params).perform()
return response.body['data'] | [
"def",
"preview",
"(",
"klass",
",",
"account",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"}",
"params",
".",
"update",
"(",
"kwargs",
")",
"# handles array to string conversion for media IDs",
"if",
"'media_ids'",
"in",
"params",
"and",
"isinstan... | Returns an HTML preview of a tweet, either new or existing. | [
"Returns",
"an",
"HTML",
"preview",
"of",
"a",
"tweet",
"either",
"new",
"or",
"existing",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/campaign.py#L338-L352 | train | 208,456 |
twitterdev/twitter-python-ads-sdk | twitter_ads/campaign.py | Tweet.create | def create(klass, account, **kwargs):
"""
Creates a "Promoted-Only" Tweet using the specialized Ads API end point.
"""
params = {}
params.update(kwargs)
# handles array to string conversion for media IDs
if 'media_ids' in params and isinstance(params['media_ids'], list):
params['media_ids'] = ','.join(map(str, params['media_ids']))
resource = klass.TWEET_CREATE.format(account_id=account.id)
response = Request(account.client, 'post', resource, params=params).perform()
return response.body['data'] | python | def create(klass, account, **kwargs):
"""
Creates a "Promoted-Only" Tweet using the specialized Ads API end point.
"""
params = {}
params.update(kwargs)
# handles array to string conversion for media IDs
if 'media_ids' in params and isinstance(params['media_ids'], list):
params['media_ids'] = ','.join(map(str, params['media_ids']))
resource = klass.TWEET_CREATE.format(account_id=account.id)
response = Request(account.client, 'post', resource, params=params).perform()
return response.body['data'] | [
"def",
"create",
"(",
"klass",
",",
"account",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"}",
"params",
".",
"update",
"(",
"kwargs",
")",
"# handles array to string conversion for media IDs",
"if",
"'media_ids'",
"in",
"params",
"and",
"isinstanc... | Creates a "Promoted-Only" Tweet using the specialized Ads API end point. | [
"Creates",
"a",
"Promoted",
"-",
"Only",
"Tweet",
"using",
"the",
"specialized",
"Ads",
"API",
"end",
"point",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/campaign.py#L355-L368 | train | 208,457 |
twitterdev/twitter-python-ads-sdk | twitter_ads/resource.py | resource_property | def resource_property(klass, name, **kwargs):
"""Builds a resource object property."""
klass.PROPERTIES[name] = kwargs
def getter(self):
return getattr(self, '_%s' % name, kwargs.get('default', None))
if kwargs.get('readonly', False):
setattr(klass, name, property(getter))
else:
def setter(self, value):
setattr(self, '_%s' % name, value)
setattr(klass, name, property(getter, setter)) | python | def resource_property(klass, name, **kwargs):
"""Builds a resource object property."""
klass.PROPERTIES[name] = kwargs
def getter(self):
return getattr(self, '_%s' % name, kwargs.get('default', None))
if kwargs.get('readonly', False):
setattr(klass, name, property(getter))
else:
def setter(self, value):
setattr(self, '_%s' % name, value)
setattr(klass, name, property(getter, setter)) | [
"def",
"resource_property",
"(",
"klass",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"klass",
".",
"PROPERTIES",
"[",
"name",
"]",
"=",
"kwargs",
"def",
"getter",
"(",
"self",
")",
":",
"return",
"getattr",
"(",
"self",
",",
"'_%s'",
"%",
"name"... | Builds a resource object property. | [
"Builds",
"a",
"resource",
"object",
"property",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/resource.py#L20-L32 | train | 208,458 |
twitterdev/twitter-python-ads-sdk | twitter_ads/resource.py | Resource.from_response | def from_response(self, response):
"""
Populates a given objects attributes from a parsed JSON API response.
This helper handles all necessary type coercions as it assigns
attribute values.
"""
for name in self.PROPERTIES:
attr = '_{0}'.format(name)
transform = self.PROPERTIES[name].get('transform', None)
value = response.get(name, None)
if transform and transform == TRANSFORM.TIME and value:
setattr(self, attr, dateutil.parser.parse(value))
if isinstance(value, int) and value == 0:
continue # skip attribute
else:
setattr(self, attr, value)
return self | python | def from_response(self, response):
"""
Populates a given objects attributes from a parsed JSON API response.
This helper handles all necessary type coercions as it assigns
attribute values.
"""
for name in self.PROPERTIES:
attr = '_{0}'.format(name)
transform = self.PROPERTIES[name].get('transform', None)
value = response.get(name, None)
if transform and transform == TRANSFORM.TIME and value:
setattr(self, attr, dateutil.parser.parse(value))
if isinstance(value, int) and value == 0:
continue # skip attribute
else:
setattr(self, attr, value)
return self | [
"def",
"from_response",
"(",
"self",
",",
"response",
")",
":",
"for",
"name",
"in",
"self",
".",
"PROPERTIES",
":",
"attr",
"=",
"'_{0}'",
".",
"format",
"(",
"name",
")",
"transform",
"=",
"self",
".",
"PROPERTIES",
"[",
"name",
"]",
".",
"get",
"(... | Populates a given objects attributes from a parsed JSON API response.
This helper handles all necessary type coercions as it assigns
attribute values. | [
"Populates",
"a",
"given",
"objects",
"attributes",
"from",
"a",
"parsed",
"JSON",
"API",
"response",
".",
"This",
"helper",
"handles",
"all",
"necessary",
"type",
"coercions",
"as",
"it",
"assigns",
"attribute",
"values",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/resource.py#L45-L62 | train | 208,459 |
twitterdev/twitter-python-ads-sdk | twitter_ads/resource.py | Resource.to_params | def to_params(self):
"""
Generates a Hash of property values for the current object. This helper
handles all necessary type coercions as it generates its output.
"""
params = {}
for name in self.PROPERTIES:
attr = '_{0}'.format(name)
value = getattr(self, attr, None) or getattr(self, name, None)
# skip attribute
if value is None:
continue
if isinstance(value, datetime):
params[name] = format_time(value)
elif isinstance(value, list):
params[name] = ','.join(map(str, value))
elif isinstance(value, bool):
params[name] = str(value).lower()
else:
params[name] = value
return params | python | def to_params(self):
"""
Generates a Hash of property values for the current object. This helper
handles all necessary type coercions as it generates its output.
"""
params = {}
for name in self.PROPERTIES:
attr = '_{0}'.format(name)
value = getattr(self, attr, None) or getattr(self, name, None)
# skip attribute
if value is None:
continue
if isinstance(value, datetime):
params[name] = format_time(value)
elif isinstance(value, list):
params[name] = ','.join(map(str, value))
elif isinstance(value, bool):
params[name] = str(value).lower()
else:
params[name] = value
return params | [
"def",
"to_params",
"(",
"self",
")",
":",
"params",
"=",
"{",
"}",
"for",
"name",
"in",
"self",
".",
"PROPERTIES",
":",
"attr",
"=",
"'_{0}'",
".",
"format",
"(",
"name",
")",
"value",
"=",
"getattr",
"(",
"self",
",",
"attr",
",",
"None",
")",
... | Generates a Hash of property values for the current object. This helper
handles all necessary type coercions as it generates its output. | [
"Generates",
"a",
"Hash",
"of",
"property",
"values",
"for",
"the",
"current",
"object",
".",
"This",
"helper",
"handles",
"all",
"necessary",
"type",
"coercions",
"as",
"it",
"generates",
"its",
"output",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/resource.py#L64-L87 | train | 208,460 |
twitterdev/twitter-python-ads-sdk | twitter_ads/resource.py | Analytics.stats | def stats(self, metrics, **kwargs): # noqa
"""
Pulls a list of metrics for the current object instance.
"""
return self.__class__.all_stats(self.account, [self.id], metrics, **kwargs) | python | def stats(self, metrics, **kwargs): # noqa
"""
Pulls a list of metrics for the current object instance.
"""
return self.__class__.all_stats(self.account, [self.id], metrics, **kwargs) | [
"def",
"stats",
"(",
"self",
",",
"metrics",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa",
"return",
"self",
".",
"__class__",
".",
"all_stats",
"(",
"self",
".",
"account",
",",
"[",
"self",
".",
"id",
"]",
",",
"metrics",
",",
"*",
"*",
"kwargs",
... | Pulls a list of metrics for the current object instance. | [
"Pulls",
"a",
"list",
"of",
"metrics",
"for",
"the",
"current",
"object",
"instance",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/resource.py#L232-L236 | train | 208,461 |
twitterdev/twitter-python-ads-sdk | twitter_ads/resource.py | Analytics._standard_params | def _standard_params(klass, ids, metric_groups, **kwargs):
"""
Sets the standard params for a stats request
"""
end_time = kwargs.get('end_time', datetime.utcnow())
start_time = kwargs.get('start_time', end_time - timedelta(seconds=604800))
granularity = kwargs.get('granularity', GRANULARITY.HOUR)
placement = kwargs.get('placement', PLACEMENT.ALL_ON_TWITTER)
params = {
'metric_groups': ','.join(metric_groups),
'start_time': to_time(start_time, granularity),
'end_time': to_time(end_time, granularity),
'granularity': granularity.upper(),
'entity': klass.ANALYTICS_MAP[klass.__name__],
'placement': placement
}
params['entity_ids'] = ','.join(ids)
return params | python | def _standard_params(klass, ids, metric_groups, **kwargs):
"""
Sets the standard params for a stats request
"""
end_time = kwargs.get('end_time', datetime.utcnow())
start_time = kwargs.get('start_time', end_time - timedelta(seconds=604800))
granularity = kwargs.get('granularity', GRANULARITY.HOUR)
placement = kwargs.get('placement', PLACEMENT.ALL_ON_TWITTER)
params = {
'metric_groups': ','.join(metric_groups),
'start_time': to_time(start_time, granularity),
'end_time': to_time(end_time, granularity),
'granularity': granularity.upper(),
'entity': klass.ANALYTICS_MAP[klass.__name__],
'placement': placement
}
params['entity_ids'] = ','.join(ids)
return params | [
"def",
"_standard_params",
"(",
"klass",
",",
"ids",
",",
"metric_groups",
",",
"*",
"*",
"kwargs",
")",
":",
"end_time",
"=",
"kwargs",
".",
"get",
"(",
"'end_time'",
",",
"datetime",
".",
"utcnow",
"(",
")",
")",
"start_time",
"=",
"kwargs",
".",
"ge... | Sets the standard params for a stats request | [
"Sets",
"the",
"standard",
"params",
"for",
"a",
"stats",
"request"
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/resource.py#L239-L259 | train | 208,462 |
twitterdev/twitter-python-ads-sdk | twitter_ads/resource.py | Analytics.all_stats | def all_stats(klass, account, ids, metric_groups, **kwargs):
"""
Pulls a list of metrics for a specified set of object IDs.
"""
params = klass._standard_params(ids, metric_groups, **kwargs)
resource = klass.RESOURCE_SYNC.format(account_id=account.id)
response = Request(account.client, 'get', resource, params=params).perform()
return response.body['data'] | python | def all_stats(klass, account, ids, metric_groups, **kwargs):
"""
Pulls a list of metrics for a specified set of object IDs.
"""
params = klass._standard_params(ids, metric_groups, **kwargs)
resource = klass.RESOURCE_SYNC.format(account_id=account.id)
response = Request(account.client, 'get', resource, params=params).perform()
return response.body['data'] | [
"def",
"all_stats",
"(",
"klass",
",",
"account",
",",
"ids",
",",
"metric_groups",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"klass",
".",
"_standard_params",
"(",
"ids",
",",
"metric_groups",
",",
"*",
"*",
"kwargs",
")",
"resource",
"=",
"kl... | Pulls a list of metrics for a specified set of object IDs. | [
"Pulls",
"a",
"list",
"of",
"metrics",
"for",
"a",
"specified",
"set",
"of",
"object",
"IDs",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/resource.py#L262-L270 | train | 208,463 |
twitterdev/twitter-python-ads-sdk | twitter_ads/resource.py | Analytics.queue_async_stats_job | def queue_async_stats_job(klass, account, ids, metric_groups, **kwargs):
"""
Queues a list of metrics for a specified set of object IDs asynchronously
"""
params = klass._standard_params(ids, metric_groups, **kwargs)
params['platform'] = kwargs.get('platform', None)
params['country'] = kwargs.get('country', None)
params['segmentation_type'] = kwargs.get('segmentation_type', None)
resource = klass.RESOURCE_ASYNC.format(account_id=account.id)
response = Request(account.client, 'post', resource, params=params).perform()
return response.body['data'] | python | def queue_async_stats_job(klass, account, ids, metric_groups, **kwargs):
"""
Queues a list of metrics for a specified set of object IDs asynchronously
"""
params = klass._standard_params(ids, metric_groups, **kwargs)
params['platform'] = kwargs.get('platform', None)
params['country'] = kwargs.get('country', None)
params['segmentation_type'] = kwargs.get('segmentation_type', None)
resource = klass.RESOURCE_ASYNC.format(account_id=account.id)
response = Request(account.client, 'post', resource, params=params).perform()
return response.body['data'] | [
"def",
"queue_async_stats_job",
"(",
"klass",
",",
"account",
",",
"ids",
",",
"metric_groups",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"klass",
".",
"_standard_params",
"(",
"ids",
",",
"metric_groups",
",",
"*",
"*",
"kwargs",
")",
"params",
... | Queues a list of metrics for a specified set of object IDs asynchronously | [
"Queues",
"a",
"list",
"of",
"metrics",
"for",
"a",
"specified",
"set",
"of",
"object",
"IDs",
"asynchronously"
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/resource.py#L273-L285 | train | 208,464 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | TailoredAudience.create | def create(klass, account, name):
"""
Creates a new tailored audience.
"""
audience = klass(account)
getattr(audience, '__create_audience__')(name)
try:
return audience.reload()
except BadRequest as e:
audience.delete()
raise e | python | def create(klass, account, name):
"""
Creates a new tailored audience.
"""
audience = klass(account)
getattr(audience, '__create_audience__')(name)
try:
return audience.reload()
except BadRequest as e:
audience.delete()
raise e | [
"def",
"create",
"(",
"klass",
",",
"account",
",",
"name",
")",
":",
"audience",
"=",
"klass",
"(",
"account",
")",
"getattr",
"(",
"audience",
",",
"'__create_audience__'",
")",
"(",
"name",
")",
"try",
":",
"return",
"audience",
".",
"reload",
"(",
... | Creates a new tailored audience. | [
"Creates",
"a",
"new",
"tailored",
"audience",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L26-L36 | train | 208,465 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | TailoredAudience.users | def users(self, params):
"""
This is a private API and requires whitelisting from Twitter.
This endpoint will allow partners to add, update and remove users from a given
tailored_audience_id.
The endpoint will also accept multiple user identifier types per user as well.
"""
resource = self.RESOURCE_USERS.format(account_id=self.account.id, id=self.id)
headers = {'Content-Type': 'application/json'}
response = Request(self.account.client,
'post',
resource,
headers=headers,
body=json.dumps(params)).perform()
success_count = response.body['data']['success_count']
total_count = response.body['data']['total_count']
return (success_count, total_count) | python | def users(self, params):
"""
This is a private API and requires whitelisting from Twitter.
This endpoint will allow partners to add, update and remove users from a given
tailored_audience_id.
The endpoint will also accept multiple user identifier types per user as well.
"""
resource = self.RESOURCE_USERS.format(account_id=self.account.id, id=self.id)
headers = {'Content-Type': 'application/json'}
response = Request(self.account.client,
'post',
resource,
headers=headers,
body=json.dumps(params)).perform()
success_count = response.body['data']['success_count']
total_count = response.body['data']['total_count']
return (success_count, total_count) | [
"def",
"users",
"(",
"self",
",",
"params",
")",
":",
"resource",
"=",
"self",
".",
"RESOURCE_USERS",
".",
"format",
"(",
"account_id",
"=",
"self",
".",
"account",
".",
"id",
",",
"id",
"=",
"self",
".",
"id",
")",
"headers",
"=",
"{",
"'Content-Typ... | This is a private API and requires whitelisting from Twitter.
This endpoint will allow partners to add, update and remove users from a given
tailored_audience_id.
The endpoint will also accept multiple user identifier types per user as well. | [
"This",
"is",
"a",
"private",
"API",
"and",
"requires",
"whitelisting",
"from",
"Twitter",
".",
"This",
"endpoint",
"will",
"allow",
"partners",
"to",
"add",
"update",
"and",
"remove",
"users",
"from",
"a",
"given",
"tailored_audience_id",
".",
"The",
"endpoin... | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L38-L54 | train | 208,466 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | TailoredAudience.permissions | def permissions(self, **kwargs):
"""
Returns a collection of permissions for the curent tailored audience.
"""
self._validate_loaded()
return TailoredAudiencePermission.all(self.account, self.id, **kwargs) | python | def permissions(self, **kwargs):
"""
Returns a collection of permissions for the curent tailored audience.
"""
self._validate_loaded()
return TailoredAudiencePermission.all(self.account, self.id, **kwargs) | [
"def",
"permissions",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_validate_loaded",
"(",
")",
"return",
"TailoredAudiencePermission",
".",
"all",
"(",
"self",
".",
"account",
",",
"self",
".",
"id",
",",
"*",
"*",
"kwargs",
")"
] | Returns a collection of permissions for the curent tailored audience. | [
"Returns",
"a",
"collection",
"of",
"permissions",
"for",
"the",
"curent",
"tailored",
"audience",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L64-L69 | train | 208,467 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | TailoredAudiencePermission.all | def all(klass, account, tailored_audience_id, **kwargs):
"""Returns a Cursor instance for the given tailored audience permission resource."""
resource = klass.RESOURCE_COLLECTION.format(
account_id=account.id,
tailored_audience_id=tailored_audience_id)
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(klass, request, init_with=[account]) | python | def all(klass, account, tailored_audience_id, **kwargs):
"""Returns a Cursor instance for the given tailored audience permission resource."""
resource = klass.RESOURCE_COLLECTION.format(
account_id=account.id,
tailored_audience_id=tailored_audience_id)
request = Request(account.client, 'get', resource, params=kwargs)
return Cursor(klass, request, init_with=[account]) | [
"def",
"all",
"(",
"klass",
",",
"account",
",",
"tailored_audience_id",
",",
"*",
"*",
"kwargs",
")",
":",
"resource",
"=",
"klass",
".",
"RESOURCE_COLLECTION",
".",
"format",
"(",
"account_id",
"=",
"account",
".",
"id",
",",
"tailored_audience_id",
"=",
... | Returns a Cursor instance for the given tailored audience permission resource. | [
"Returns",
"a",
"Cursor",
"instance",
"for",
"the",
"given",
"tailored",
"audience",
"permission",
"resource",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L106-L114 | train | 208,468 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | TailoredAudiencePermission.save | def save(self):
"""
Saves or updates the current tailored audience permission.
"""
if self.id:
method = 'put'
resource = self.RESOURCE.format(
account_id=self.account.id,
tailored_audience_id=self.tailored_audience_id,
id=self.id)
else:
method = 'post'
resource = self.RESOURCE_COLLECTION.format(
account_id=self.account.id,
tailored_audience_id=self.tailored_audience_id)
response = Request(
self.account.client, method,
resource, params=self.to_params()).perform()
return self.from_response(response.body['data']) | python | def save(self):
"""
Saves or updates the current tailored audience permission.
"""
if self.id:
method = 'put'
resource = self.RESOURCE.format(
account_id=self.account.id,
tailored_audience_id=self.tailored_audience_id,
id=self.id)
else:
method = 'post'
resource = self.RESOURCE_COLLECTION.format(
account_id=self.account.id,
tailored_audience_id=self.tailored_audience_id)
response = Request(
self.account.client, method,
resource, params=self.to_params()).perform()
return self.from_response(response.body['data']) | [
"def",
"save",
"(",
"self",
")",
":",
"if",
"self",
".",
"id",
":",
"method",
"=",
"'put'",
"resource",
"=",
"self",
".",
"RESOURCE",
".",
"format",
"(",
"account_id",
"=",
"self",
".",
"account",
".",
"id",
",",
"tailored_audience_id",
"=",
"self",
... | Saves or updates the current tailored audience permission. | [
"Saves",
"or",
"updates",
"the",
"current",
"tailored",
"audience",
"permission",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L116-L136 | train | 208,469 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | TailoredAudiencePermission.delete | def delete(self):
"""
Deletes the current tailored audience permission.
"""
resource = self.RESOURCE.format(
account_id=self.account.id,
tailored_audience_id=self.tailored_audience_id,
id=self.id)
response = Request(self.account.client, 'delete', resource).perform()
return self.from_response(response.body['data']) | python | def delete(self):
"""
Deletes the current tailored audience permission.
"""
resource = self.RESOURCE.format(
account_id=self.account.id,
tailored_audience_id=self.tailored_audience_id,
id=self.id)
response = Request(self.account.client, 'delete', resource).perform()
return self.from_response(response.body['data']) | [
"def",
"delete",
"(",
"self",
")",
":",
"resource",
"=",
"self",
".",
"RESOURCE",
".",
"format",
"(",
"account_id",
"=",
"self",
".",
"account",
".",
"id",
",",
"tailored_audience_id",
"=",
"self",
".",
"tailored_audience_id",
",",
"id",
"=",
"self",
"."... | Deletes the current tailored audience permission. | [
"Deletes",
"the",
"current",
"tailored",
"audience",
"permission",
"."
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L138-L147 | train | 208,470 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | AudienceIntelligence.__get | def __get(klass, account, client, params):
"""
Helper function to get the conversation data
Returns a Cursor instance
"""
resource = klass.RESOURCE_CONVERSATIONS.format(account_id=account.id)
request = Request(
account.client, klass.METHOD,
resource, headers=klass.HEADERS, body=params)
return Cursor(klass, request, init_with=[account]) | python | def __get(klass, account, client, params):
"""
Helper function to get the conversation data
Returns a Cursor instance
"""
resource = klass.RESOURCE_CONVERSATIONS.format(account_id=account.id)
request = Request(
account.client, klass.METHOD,
resource, headers=klass.HEADERS, body=params)
return Cursor(klass, request, init_with=[account]) | [
"def",
"__get",
"(",
"klass",
",",
"account",
",",
"client",
",",
"params",
")",
":",
"resource",
"=",
"klass",
".",
"RESOURCE_CONVERSATIONS",
".",
"format",
"(",
"account_id",
"=",
"account",
".",
"id",
")",
"request",
"=",
"Request",
"(",
"account",
".... | Helper function to get the conversation data
Returns a Cursor instance | [
"Helper",
"function",
"to",
"get",
"the",
"conversation",
"data",
"Returns",
"a",
"Cursor",
"instance"
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L173-L183 | train | 208,471 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | AudienceIntelligence.conversations | def conversations(self):
"""
Get the conversation topics for an input targeting criteria
"""
body = {
"conversation_type": self.conversation_type,
"audience_definition": self.audience_definition,
"targeting_inputs": self.targeting_inputs
}
return self.__get(account=self.account, client=self.account.client, params=json.dumps(body)) | python | def conversations(self):
"""
Get the conversation topics for an input targeting criteria
"""
body = {
"conversation_type": self.conversation_type,
"audience_definition": self.audience_definition,
"targeting_inputs": self.targeting_inputs
}
return self.__get(account=self.account, client=self.account.client, params=json.dumps(body)) | [
"def",
"conversations",
"(",
"self",
")",
":",
"body",
"=",
"{",
"\"conversation_type\"",
":",
"self",
".",
"conversation_type",
",",
"\"audience_definition\"",
":",
"self",
".",
"audience_definition",
",",
"\"targeting_inputs\"",
":",
"self",
".",
"targeting_inputs... | Get the conversation topics for an input targeting criteria | [
"Get",
"the",
"conversation",
"topics",
"for",
"an",
"input",
"targeting",
"criteria"
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L185-L194 | train | 208,472 |
twitterdev/twitter-python-ads-sdk | twitter_ads/audience.py | AudienceIntelligence.demographics | def demographics(self):
"""
Get the demographic breakdown for an input targeting criteria
"""
body = {
"audience_definition": self.audience_definition,
"targeting_inputs": self.targeting_inputs
}
resource = self.RESOURCE_DEMOGRAPHICS.format(account_id=self.account.id)
response = Request(
self.account.client, self.METHOD,
resource, headers=self.HEADERS, body=json.dumps(body)).perform()
return response.body['data'] | python | def demographics(self):
"""
Get the demographic breakdown for an input targeting criteria
"""
body = {
"audience_definition": self.audience_definition,
"targeting_inputs": self.targeting_inputs
}
resource = self.RESOURCE_DEMOGRAPHICS.format(account_id=self.account.id)
response = Request(
self.account.client, self.METHOD,
resource, headers=self.HEADERS, body=json.dumps(body)).perform()
return response.body['data'] | [
"def",
"demographics",
"(",
"self",
")",
":",
"body",
"=",
"{",
"\"audience_definition\"",
":",
"self",
".",
"audience_definition",
",",
"\"targeting_inputs\"",
":",
"self",
".",
"targeting_inputs",
"}",
"resource",
"=",
"self",
".",
"RESOURCE_DEMOGRAPHICS",
".",
... | Get the demographic breakdown for an input targeting criteria | [
"Get",
"the",
"demographic",
"breakdown",
"for",
"an",
"input",
"targeting",
"criteria"
] | b4488333ac2a794b85b7f16ded71e98b60e51c74 | https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L196-L208 | train | 208,473 |
buildbot/buildbot_travis | buildbot_travis/steps/create_steps.py | ShellCommand.setupEnvironment | def setupEnvironment(self, cmd):
""" Turn all build properties into environment variables """
shell.ShellCommand.setupEnvironment(self, cmd)
env = {}
for k, v in self.build.getProperties().properties.items():
env[str(k)] = str(v[0])
if cmd.args['env'] is None:
cmd.args['env'] = {}
cmd.args['env'].update(env) | python | def setupEnvironment(self, cmd):
""" Turn all build properties into environment variables """
shell.ShellCommand.setupEnvironment(self, cmd)
env = {}
for k, v in self.build.getProperties().properties.items():
env[str(k)] = str(v[0])
if cmd.args['env'] is None:
cmd.args['env'] = {}
cmd.args['env'].update(env) | [
"def",
"setupEnvironment",
"(",
"self",
",",
"cmd",
")",
":",
"shell",
".",
"ShellCommand",
".",
"setupEnvironment",
"(",
"self",
",",
"cmd",
")",
"env",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"build",
".",
"getProperties",
"(",
")",... | Turn all build properties into environment variables | [
"Turn",
"all",
"build",
"properties",
"into",
"environment",
"variables"
] | 350c657b7aabaf5bc6a9fdb55febdd9d8eabd60c | https://github.com/buildbot/buildbot_travis/blob/350c657b7aabaf5bc6a9fdb55febdd9d8eabd60c/buildbot_travis/steps/create_steps.py#L84-L92 | train | 208,474 |
buildbot/buildbot_travis | buildbot_travis/steps/create_steps.py | ShellCommand.updateStats | def updateStats(self, log):
"""
Parse test results out of common test harnesses.
Currently supported are:
* Plone
* Nose
* Trial
* Something mitchell wrote in Java
"""
stdio = log.getText()
total = passed = skipped = fails = warnings = errors = 0
hastests = False
# Plone? That has lines starting "Ran" and "Total". Total is missing if there is only a single layer.
# For this reason, we total ourselves which lets us work even if someone runes 2 batches of plone tests
# from a single target
# Example::
# Ran 24 tests with 0 failures and 0 errors in 0.009 seconds
if not hastests:
outputs = re.findall(
"Ran (?P<count>[\d]+) tests with (?P<fail>[\d]+) failures and (?P<error>[\d]+) errors",
stdio)
for output in outputs:
total += int(output[0])
fails += int(output[1])
errors += int(output[2])
hastests = True
# Twisted
# Example::
# FAILED (errors=5, successes=11)
# PASSED (successes=16)
if not hastests:
for line in stdio.split("\n"):
if line.startswith("FAILED (") or line.startswith("PASSED ("):
hastests = True
line = line[8:][:-1]
stats = line.split(", ")
data = {}
for stat in stats:
k, v = stat.split("=")
data[k] = int(v)
if "successes" not in data:
total = 0
for number in re.findall(
"Ran (?P<count>[\d]+) tests in ", stdio):
total += int(number)
data["successes"] = total - sum(data.values())
# This matches Nose and Django output
# Example::
# Ran 424 tests in 152.927s
# FAILED (failures=1)
# FAILED (errors=3)
if not hastests:
fails += len(re.findall('FAIL:', stdio))
errors += len(
re.findall(
'======================================================================\nERROR:',
stdio))
for number in re.findall("Ran (?P<count>[\d]+)", stdio):
total += int(number)
hastests = True
# We work out passed at the end because most test runners dont tell us
# and we can't distinguish between different test systems easily so we
# might double count.
passed = total - (skipped + fails + errors + warnings)
# Update the step statistics with out shiny new totals
if hastests:
self.setStatistic('total', total)
self.setStatistic('fails', fails)
self.setStatistic('errors', errors)
self.setStatistic('warnings', warnings)
self.setStatistic('skipped', skipped)
self.setStatistic('passed', passed) | python | def updateStats(self, log):
"""
Parse test results out of common test harnesses.
Currently supported are:
* Plone
* Nose
* Trial
* Something mitchell wrote in Java
"""
stdio = log.getText()
total = passed = skipped = fails = warnings = errors = 0
hastests = False
# Plone? That has lines starting "Ran" and "Total". Total is missing if there is only a single layer.
# For this reason, we total ourselves which lets us work even if someone runes 2 batches of plone tests
# from a single target
# Example::
# Ran 24 tests with 0 failures and 0 errors in 0.009 seconds
if not hastests:
outputs = re.findall(
"Ran (?P<count>[\d]+) tests with (?P<fail>[\d]+) failures and (?P<error>[\d]+) errors",
stdio)
for output in outputs:
total += int(output[0])
fails += int(output[1])
errors += int(output[2])
hastests = True
# Twisted
# Example::
# FAILED (errors=5, successes=11)
# PASSED (successes=16)
if not hastests:
for line in stdio.split("\n"):
if line.startswith("FAILED (") or line.startswith("PASSED ("):
hastests = True
line = line[8:][:-1]
stats = line.split(", ")
data = {}
for stat in stats:
k, v = stat.split("=")
data[k] = int(v)
if "successes" not in data:
total = 0
for number in re.findall(
"Ran (?P<count>[\d]+) tests in ", stdio):
total += int(number)
data["successes"] = total - sum(data.values())
# This matches Nose and Django output
# Example::
# Ran 424 tests in 152.927s
# FAILED (failures=1)
# FAILED (errors=3)
if not hastests:
fails += len(re.findall('FAIL:', stdio))
errors += len(
re.findall(
'======================================================================\nERROR:',
stdio))
for number in re.findall("Ran (?P<count>[\d]+)", stdio):
total += int(number)
hastests = True
# We work out passed at the end because most test runners dont tell us
# and we can't distinguish between different test systems easily so we
# might double count.
passed = total - (skipped + fails + errors + warnings)
# Update the step statistics with out shiny new totals
if hastests:
self.setStatistic('total', total)
self.setStatistic('fails', fails)
self.setStatistic('errors', errors)
self.setStatistic('warnings', warnings)
self.setStatistic('skipped', skipped)
self.setStatistic('passed', passed) | [
"def",
"updateStats",
"(",
"self",
",",
"log",
")",
":",
"stdio",
"=",
"log",
".",
"getText",
"(",
")",
"total",
"=",
"passed",
"=",
"skipped",
"=",
"fails",
"=",
"warnings",
"=",
"errors",
"=",
"0",
"hastests",
"=",
"False",
"# Plone? That has lines sta... | Parse test results out of common test harnesses.
Currently supported are:
* Plone
* Nose
* Trial
* Something mitchell wrote in Java | [
"Parse",
"test",
"results",
"out",
"of",
"common",
"test",
"harnesses",
"."
] | 350c657b7aabaf5bc6a9fdb55febdd9d8eabd60c | https://github.com/buildbot/buildbot_travis/blob/350c657b7aabaf5bc6a9fdb55febdd9d8eabd60c/buildbot_travis/steps/create_steps.py#L103-L190 | train | 208,475 |
buildbot/buildbot_travis | buildbot_travis/api.py | Api.saveConfig | def saveConfig(self, request):
"""I save the config, and run check_config, potencially returning errors"""
res = yield self.assertAllowed(request)
if res:
defer.returnValue(res)
request.setHeader('Content-Type', 'application/json')
if self._in_progress:
defer.returnValue(json.dumps({'success': False, 'errors': ['reconfig already in progress']}))
self._in_progress = True
cfg = json.loads(request.content.read())
if cfg != self._cfg:
try:
err = yield self.saveCfg(cfg)
except Exception as e: # noqa
err = [repr(e)]
if err is not None:
self._in_progress = False
yield self.saveCfg(self._cfg)
defer.returnValue(json.dumps({'success': False, 'errors': err}))
yield self.ep.master.reconfig()
defer.returnValue(json.dumps({'success': True})) | python | def saveConfig(self, request):
"""I save the config, and run check_config, potencially returning errors"""
res = yield self.assertAllowed(request)
if res:
defer.returnValue(res)
request.setHeader('Content-Type', 'application/json')
if self._in_progress:
defer.returnValue(json.dumps({'success': False, 'errors': ['reconfig already in progress']}))
self._in_progress = True
cfg = json.loads(request.content.read())
if cfg != self._cfg:
try:
err = yield self.saveCfg(cfg)
except Exception as e: # noqa
err = [repr(e)]
if err is not None:
self._in_progress = False
yield self.saveCfg(self._cfg)
defer.returnValue(json.dumps({'success': False, 'errors': err}))
yield self.ep.master.reconfig()
defer.returnValue(json.dumps({'success': True})) | [
"def",
"saveConfig",
"(",
"self",
",",
"request",
")",
":",
"res",
"=",
"yield",
"self",
".",
"assertAllowed",
"(",
"request",
")",
"if",
"res",
":",
"defer",
".",
"returnValue",
"(",
"res",
")",
"request",
".",
"setHeader",
"(",
"'Content-Type'",
",",
... | I save the config, and run check_config, potencially returning errors | [
"I",
"save",
"the",
"config",
"and",
"run",
"check_config",
"potencially",
"returning",
"errors"
] | 350c657b7aabaf5bc6a9fdb55febdd9d8eabd60c | https://github.com/buildbot/buildbot_travis/blob/350c657b7aabaf5bc6a9fdb55febdd9d8eabd60c/buildbot_travis/api.py#L96-L117 | train | 208,476 |
Mindwerks/worldengine | worldengine/common.py | anti_alias | def anti_alias(map_in, steps):
"""
Execute the anti_alias operation steps times on the given map
"""
height, width = map_in.shape
map_part = (2.0/11.0)*map_in
# notice how [-1/sqrt(3), -1/sqrt(3), -1/sqrt(3)] * [-1/sqrt(3), -1/sqrt(3), -1/sqrt(3)]^T
# equals [[1/3, 1/3, 1/3], [1/3, 1/3, 1/3], [1/3, 1/3, 1/3]]
# multiply that by (3/11) and we have the 2d kernel from the example above
# therefore the kernel is seperable
w = -1.0/numpy.sqrt(3.0)
kernel = [w, w, w]
def _anti_alias_step(original):
# cf. comments above fo the factor
# this also makes a copy which might actually be superfluous
result = original * (3.0/11.0)
# we need to handle boundary conditions by hand, unfortunately
# there might be a better way but this works (circular boundary)
# notice how we'll need to add 2 to width and height later
# because of this
result = numpy.append(result, [result[0,:]], 0)
result = numpy.append(result, numpy.transpose([result[:,0]]), 1)
result = numpy.insert(result, [0], [result[-2,:]],0)
result = numpy.insert(result, [0], numpy.transpose([result[:,-2]]), 1)
# with a seperable kernel we can convolve the rows first ...
for y in range(height+2):
result[y,1:-1] = numpy.convolve(result[y,:], kernel, 'valid')
# ... and then the columns
for x in range(width+2):
result[1:-1,x] = numpy.convolve(result[:,x], kernel, 'valid')
# throw away invalid values at the boundary
result = result[1:-1,1:-1]
result += map_part
return result
current = map_in
for i in range(steps):
current = _anti_alias_step(current)
return current | python | def anti_alias(map_in, steps):
"""
Execute the anti_alias operation steps times on the given map
"""
height, width = map_in.shape
map_part = (2.0/11.0)*map_in
# notice how [-1/sqrt(3), -1/sqrt(3), -1/sqrt(3)] * [-1/sqrt(3), -1/sqrt(3), -1/sqrt(3)]^T
# equals [[1/3, 1/3, 1/3], [1/3, 1/3, 1/3], [1/3, 1/3, 1/3]]
# multiply that by (3/11) and we have the 2d kernel from the example above
# therefore the kernel is seperable
w = -1.0/numpy.sqrt(3.0)
kernel = [w, w, w]
def _anti_alias_step(original):
# cf. comments above fo the factor
# this also makes a copy which might actually be superfluous
result = original * (3.0/11.0)
# we need to handle boundary conditions by hand, unfortunately
# there might be a better way but this works (circular boundary)
# notice how we'll need to add 2 to width and height later
# because of this
result = numpy.append(result, [result[0,:]], 0)
result = numpy.append(result, numpy.transpose([result[:,0]]), 1)
result = numpy.insert(result, [0], [result[-2,:]],0)
result = numpy.insert(result, [0], numpy.transpose([result[:,-2]]), 1)
# with a seperable kernel we can convolve the rows first ...
for y in range(height+2):
result[y,1:-1] = numpy.convolve(result[y,:], kernel, 'valid')
# ... and then the columns
for x in range(width+2):
result[1:-1,x] = numpy.convolve(result[:,x], kernel, 'valid')
# throw away invalid values at the boundary
result = result[1:-1,1:-1]
result += map_part
return result
current = map_in
for i in range(steps):
current = _anti_alias_step(current)
return current | [
"def",
"anti_alias",
"(",
"map_in",
",",
"steps",
")",
":",
"height",
",",
"width",
"=",
"map_in",
".",
"shape",
"map_part",
"=",
"(",
"2.0",
"/",
"11.0",
")",
"*",
"map_in",
"# notice how [-1/sqrt(3), -1/sqrt(3), -1/sqrt(3)] * [-1/sqrt(3), -1/sqrt(3), -1/sqrt(3)]^T",... | Execute the anti_alias operation steps times on the given map | [
"Execute",
"the",
"anti_alias",
"operation",
"steps",
"times",
"on",
"the",
"given",
"map"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/common.py#L83-L134 | train | 208,477 |
Mindwerks/worldengine | worldengine/common.py | count_neighbours | def count_neighbours(mask, radius=1):
'''Count how many neighbours of a coordinate are set to one.
This uses the same principles as anti_alias, compare comments there.'''
height, width = mask.shape
f = 2.0*radius+1.0
w = -1.0/numpy.sqrt(f)
kernel = [w]*radius + [w] + [w]*radius
result = mask * f
for y in range(height):
result[y,:] = numpy.convolve(result[y,:], kernel, 'same')
for x in range(width):
result[:,x] = numpy.convolve(result[:, x], kernel, 'same')
return result - mask | python | def count_neighbours(mask, radius=1):
'''Count how many neighbours of a coordinate are set to one.
This uses the same principles as anti_alias, compare comments there.'''
height, width = mask.shape
f = 2.0*radius+1.0
w = -1.0/numpy.sqrt(f)
kernel = [w]*radius + [w] + [w]*radius
result = mask * f
for y in range(height):
result[y,:] = numpy.convolve(result[y,:], kernel, 'same')
for x in range(width):
result[:,x] = numpy.convolve(result[:, x], kernel, 'same')
return result - mask | [
"def",
"count_neighbours",
"(",
"mask",
",",
"radius",
"=",
"1",
")",
":",
"height",
",",
"width",
"=",
"mask",
".",
"shape",
"f",
"=",
"2.0",
"*",
"radius",
"+",
"1.0",
"w",
"=",
"-",
"1.0",
"/",
"numpy",
".",
"sqrt",
"(",
"f",
")",
"kernel",
... | Count how many neighbours of a coordinate are set to one.
This uses the same principles as anti_alias, compare comments there. | [
"Count",
"how",
"many",
"neighbours",
"of",
"a",
"coordinate",
"are",
"set",
"to",
"one",
".",
"This",
"uses",
"the",
"same",
"principles",
"as",
"anti_alias",
"compare",
"comments",
"there",
"."
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/common.py#L136-L155 | train | 208,478 |
Mindwerks/worldengine | worldengine/draw.py | average_colors | def average_colors(c1, c2):
''' Average the values of two colors together '''
r = int((c1[0] + c2[0])/2)
g = int((c1[1] + c2[1])/2)
b = int((c1[2] + c2[2])/2)
return (r, g, b) | python | def average_colors(c1, c2):
''' Average the values of two colors together '''
r = int((c1[0] + c2[0])/2)
g = int((c1[1] + c2[1])/2)
b = int((c1[2] + c2[2])/2)
return (r, g, b) | [
"def",
"average_colors",
"(",
"c1",
",",
"c2",
")",
":",
"r",
"=",
"int",
"(",
"(",
"c1",
"[",
"0",
"]",
"+",
"c2",
"[",
"0",
"]",
")",
"/",
"2",
")",
"g",
"=",
"int",
"(",
"(",
"c1",
"[",
"1",
"]",
"+",
"c2",
"[",
"1",
"]",
")",
"/",... | Average the values of two colors together | [
"Average",
"the",
"values",
"of",
"two",
"colors",
"together"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/draw.py#L225-L231 | train | 208,479 |
Mindwerks/worldengine | worldengine/draw.py | get_normalized_elevation_array | def get_normalized_elevation_array(world):
''' Convert raw elevation into normalized values between 0 and 255,
and return a numpy array of these values '''
e = world.layers['elevation'].data
ocean = world.layers['ocean'].data
mask = numpy.ma.array(e, mask=ocean) # only land
min_elev_land = mask.min()
max_elev_land = mask.max()
elev_delta_land = max_elev_land - min_elev_land
mask = numpy.ma.array(e, mask=numpy.logical_not(ocean)) # only ocean
min_elev_sea = mask.min()
max_elev_sea = mask.max()
elev_delta_sea = max_elev_sea - min_elev_sea
c = numpy.empty(e.shape, dtype=numpy.float)
c[numpy.invert(ocean)] = (e[numpy.invert(ocean)] - min_elev_land) * 127 / elev_delta_land + 128
c[ocean] = (e[ocean] - min_elev_sea) * 127 / elev_delta_sea
c = numpy.rint(c).astype(dtype=numpy.int32) # proper rounding
return c | python | def get_normalized_elevation_array(world):
''' Convert raw elevation into normalized values between 0 and 255,
and return a numpy array of these values '''
e = world.layers['elevation'].data
ocean = world.layers['ocean'].data
mask = numpy.ma.array(e, mask=ocean) # only land
min_elev_land = mask.min()
max_elev_land = mask.max()
elev_delta_land = max_elev_land - min_elev_land
mask = numpy.ma.array(e, mask=numpy.logical_not(ocean)) # only ocean
min_elev_sea = mask.min()
max_elev_sea = mask.max()
elev_delta_sea = max_elev_sea - min_elev_sea
c = numpy.empty(e.shape, dtype=numpy.float)
c[numpy.invert(ocean)] = (e[numpy.invert(ocean)] - min_elev_land) * 127 / elev_delta_land + 128
c[ocean] = (e[ocean] - min_elev_sea) * 127 / elev_delta_sea
c = numpy.rint(c).astype(dtype=numpy.int32) # proper rounding
return c | [
"def",
"get_normalized_elevation_array",
"(",
"world",
")",
":",
"e",
"=",
"world",
".",
"layers",
"[",
"'elevation'",
"]",
".",
"data",
"ocean",
"=",
"world",
".",
"layers",
"[",
"'ocean'",
"]",
".",
"data",
"mask",
"=",
"numpy",
".",
"ma",
".",
"arra... | Convert raw elevation into normalized values between 0 and 255,
and return a numpy array of these values | [
"Convert",
"raw",
"elevation",
"into",
"normalized",
"values",
"between",
"0",
"and",
"255",
"and",
"return",
"a",
"numpy",
"array",
"of",
"these",
"values"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/draw.py#L234-L256 | train | 208,480 |
Mindwerks/worldengine | worldengine/draw.py | get_biome_color_based_on_elevation | def get_biome_color_based_on_elevation(world, elev, x, y, rng):
''' This is the "business logic" for determining the base biome color in satellite view.
This includes generating some "noise" at each spot in a pixel's rgb value, potentially
modifying the noise based on elevation, and finally incorporating this with the base biome color.
The basic rules regarding noise generation are:
- Oceans have no noise added
- land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value
- land tiles with high elevations further modify the noise by set amounts (to drain some of the
color and make the map look more like mountains)
The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough.
Finally, the noise plus the biome color are added and returned.
rng refers to an instance of a random number generator used to draw the random samples needed by this function.
'''
v = world.biome_at((x, y)).name()
biome_color = _biome_satellite_colors[v]
# Default is no noise - will be overwritten if this tile is land
noise = (0, 0, 0)
if world.is_land((x, y)):
## Generate some random noise to apply to this pixel
# There is noise for each element of the rgb value
# This noise will be further modified by the height of this tile
noise = rng.randint(-NOISE_RANGE, NOISE_RANGE, size=3) # draw three random numbers at once
####### Case 1 - elevation is very high ########
if elev > HIGH_MOUNTAIN_ELEV:
# Modify the noise to make the area slightly brighter to simulate snow-topped mountains.
noise = add_colors(noise, HIGH_MOUNTAIN_NOISE_MODIFIER)
# Average the biome's color with the MOUNTAIN_COLOR to tint the terrain
biome_color = average_colors(biome_color, MOUNTAIN_COLOR)
####### Case 2 - elevation is high ########
elif elev > MOUNTAIN_ELEV:
# Modify the noise to make this tile slightly darker, especially draining the green
noise = add_colors(noise, MOUNTAIN_NOISE_MODIFIER)
# Average the biome's color with the MOUNTAIN_COLOR to tint the terrain
biome_color = average_colors(biome_color, MOUNTAIN_COLOR)
####### Case 3 - elevation is somewhat high ########
elif elev > HIGH_HILL_ELEV:
noise = add_colors(noise, HIGH_HILL_NOISE_MODIFIER)
####### Case 4 - elevation is a little bit high ########
elif elev > HILL_ELEV:
noise = add_colors(noise, HILL_NOISE_MODIFIER)
# There is also a minor base modifier to the pixel's rgb value based on height
modification_amount = int(elev / BASE_ELEVATION_INTENSITY_MODIFIER)
base_elevation_modifier = (modification_amount, modification_amount, modification_amount)
this_tile_color = add_colors(biome_color, noise, base_elevation_modifier)
return this_tile_color | python | def get_biome_color_based_on_elevation(world, elev, x, y, rng):
''' This is the "business logic" for determining the base biome color in satellite view.
This includes generating some "noise" at each spot in a pixel's rgb value, potentially
modifying the noise based on elevation, and finally incorporating this with the base biome color.
The basic rules regarding noise generation are:
- Oceans have no noise added
- land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value
- land tiles with high elevations further modify the noise by set amounts (to drain some of the
color and make the map look more like mountains)
The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough.
Finally, the noise plus the biome color are added and returned.
rng refers to an instance of a random number generator used to draw the random samples needed by this function.
'''
v = world.biome_at((x, y)).name()
biome_color = _biome_satellite_colors[v]
# Default is no noise - will be overwritten if this tile is land
noise = (0, 0, 0)
if world.is_land((x, y)):
## Generate some random noise to apply to this pixel
# There is noise for each element of the rgb value
# This noise will be further modified by the height of this tile
noise = rng.randint(-NOISE_RANGE, NOISE_RANGE, size=3) # draw three random numbers at once
####### Case 1 - elevation is very high ########
if elev > HIGH_MOUNTAIN_ELEV:
# Modify the noise to make the area slightly brighter to simulate snow-topped mountains.
noise = add_colors(noise, HIGH_MOUNTAIN_NOISE_MODIFIER)
# Average the biome's color with the MOUNTAIN_COLOR to tint the terrain
biome_color = average_colors(biome_color, MOUNTAIN_COLOR)
####### Case 2 - elevation is high ########
elif elev > MOUNTAIN_ELEV:
# Modify the noise to make this tile slightly darker, especially draining the green
noise = add_colors(noise, MOUNTAIN_NOISE_MODIFIER)
# Average the biome's color with the MOUNTAIN_COLOR to tint the terrain
biome_color = average_colors(biome_color, MOUNTAIN_COLOR)
####### Case 3 - elevation is somewhat high ########
elif elev > HIGH_HILL_ELEV:
noise = add_colors(noise, HIGH_HILL_NOISE_MODIFIER)
####### Case 4 - elevation is a little bit high ########
elif elev > HILL_ELEV:
noise = add_colors(noise, HILL_NOISE_MODIFIER)
# There is also a minor base modifier to the pixel's rgb value based on height
modification_amount = int(elev / BASE_ELEVATION_INTENSITY_MODIFIER)
base_elevation_modifier = (modification_amount, modification_amount, modification_amount)
this_tile_color = add_colors(biome_color, noise, base_elevation_modifier)
return this_tile_color | [
"def",
"get_biome_color_based_on_elevation",
"(",
"world",
",",
"elev",
",",
"x",
",",
"y",
",",
"rng",
")",
":",
"v",
"=",
"world",
".",
"biome_at",
"(",
"(",
"x",
",",
"y",
")",
")",
".",
"name",
"(",
")",
"biome_color",
"=",
"_biome_satellite_colors... | This is the "business logic" for determining the base biome color in satellite view.
This includes generating some "noise" at each spot in a pixel's rgb value, potentially
modifying the noise based on elevation, and finally incorporating this with the base biome color.
The basic rules regarding noise generation are:
- Oceans have no noise added
- land tiles start with noise somewhere inside (-NOISE_RANGE, NOISE_RANGE) for each rgb value
- land tiles with high elevations further modify the noise by set amounts (to drain some of the
color and make the map look more like mountains)
The biome's base color may be interpolated with a predefined mountain brown color if the elevation is high enough.
Finally, the noise plus the biome color are added and returned.
rng refers to an instance of a random number generator used to draw the random samples needed by this function. | [
"This",
"is",
"the",
"business",
"logic",
"for",
"determining",
"the",
"base",
"biome",
"color",
"in",
"satellite",
"view",
".",
"This",
"includes",
"generating",
"some",
"noise",
"at",
"each",
"spot",
"in",
"a",
"pixel",
"s",
"rgb",
"value",
"potentially",
... | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/draw.py#L259-L316 | train | 208,481 |
Mindwerks/worldengine | worldengine/simulations/erosion.py | ErosionSimulation.find_water_flow | def find_water_flow(self, world, water_path):
"""Find the flow direction for each cell in heightmap"""
# iterate through each cell
for x in range(world.width - 1):
for y in range(world.height - 1):
# search around cell for a direction
path = self.find_quick_path([x, y], world)
if path:
tx, ty = path
flow_dir = [tx - x, ty - y]
key = 0
for direction in DIR_NEIGHBORS_CENTER:
if direction == flow_dir:
water_path[y, x] = key
key += 1 | python | def find_water_flow(self, world, water_path):
"""Find the flow direction for each cell in heightmap"""
# iterate through each cell
for x in range(world.width - 1):
for y in range(world.height - 1):
# search around cell for a direction
path = self.find_quick_path([x, y], world)
if path:
tx, ty = path
flow_dir = [tx - x, ty - y]
key = 0
for direction in DIR_NEIGHBORS_CENTER:
if direction == flow_dir:
water_path[y, x] = key
key += 1 | [
"def",
"find_water_flow",
"(",
"self",
",",
"world",
",",
"water_path",
")",
":",
"# iterate through each cell",
"for",
"x",
"in",
"range",
"(",
"world",
".",
"width",
"-",
"1",
")",
":",
"for",
"y",
"in",
"range",
"(",
"world",
".",
"height",
"-",
"1"... | Find the flow direction for each cell in heightmap | [
"Find",
"the",
"flow",
"direction",
"for",
"each",
"cell",
"in",
"heightmap"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/simulations/erosion.py#L76-L91 | train | 208,482 |
Mindwerks/worldengine | worldengine/simulations/erosion.py | ErosionSimulation.river_sources | def river_sources(world, water_flow, water_path):
"""Find places on map where sources of river can be found"""
river_source_list = []
# Using the wind and rainfall data, create river 'seeds' by
# flowing rainfall along paths until a 'flow' threshold is reached
# and we have a beginning of a river... trickle->stream->river->sea
# step one: Using flow direction, follow the path for each cell
# adding the previous cell's flow to the current cell's flow.
# step two: We loop through the water flow map looking for cells
# above the water flow threshold. These are our river sources and
# we mark them as rivers. While looking, the cells with no
# out-going flow, above water flow threshold and are still
# above sea level are marked as 'sources'.
for y in range(0, world.height - 1):
for x in range(0, world.width - 1):
rain_fall = world.layers['precipitation'].data[y, x]
water_flow[y, x] = rain_fall
if water_path[y, x] == 0:
continue # ignore cells without flow direction
cx, cy = x, y # begin with starting location
neighbour_seed_found = False
# follow flow path to where it may lead
while not neighbour_seed_found:
# have we found a seed?
if world.is_mountain((cx, cy)) and water_flow[cy, cx] >= RIVER_TH:
# try not to create seeds around other seeds
for seed in river_source_list:
sx, sy = seed
if in_circle(9, cx, cy, sx, sy):
neighbour_seed_found = True
if neighbour_seed_found:
break # we do not want seeds for neighbors
river_source_list.append([cx, cy]) # river seed
break
# no path means dead end...
if water_path[cy, cx] == 0:
break # break out of loop
# follow path, add water flow from previous cell
dx, dy = DIR_NEIGHBORS_CENTER[water_path[cy, cx]]
nx, ny = cx + dx, cy + dy # calculate next cell
water_flow[ny, nx] += rain_fall
cx, cy = nx, ny # set current cell to next cell
return river_source_list | python | def river_sources(world, water_flow, water_path):
"""Find places on map where sources of river can be found"""
river_source_list = []
# Using the wind and rainfall data, create river 'seeds' by
# flowing rainfall along paths until a 'flow' threshold is reached
# and we have a beginning of a river... trickle->stream->river->sea
# step one: Using flow direction, follow the path for each cell
# adding the previous cell's flow to the current cell's flow.
# step two: We loop through the water flow map looking for cells
# above the water flow threshold. These are our river sources and
# we mark them as rivers. While looking, the cells with no
# out-going flow, above water flow threshold and are still
# above sea level are marked as 'sources'.
for y in range(0, world.height - 1):
for x in range(0, world.width - 1):
rain_fall = world.layers['precipitation'].data[y, x]
water_flow[y, x] = rain_fall
if water_path[y, x] == 0:
continue # ignore cells without flow direction
cx, cy = x, y # begin with starting location
neighbour_seed_found = False
# follow flow path to where it may lead
while not neighbour_seed_found:
# have we found a seed?
if world.is_mountain((cx, cy)) and water_flow[cy, cx] >= RIVER_TH:
# try not to create seeds around other seeds
for seed in river_source_list:
sx, sy = seed
if in_circle(9, cx, cy, sx, sy):
neighbour_seed_found = True
if neighbour_seed_found:
break # we do not want seeds for neighbors
river_source_list.append([cx, cy]) # river seed
break
# no path means dead end...
if water_path[cy, cx] == 0:
break # break out of loop
# follow path, add water flow from previous cell
dx, dy = DIR_NEIGHBORS_CENTER[water_path[cy, cx]]
nx, ny = cx + dx, cy + dy # calculate next cell
water_flow[ny, nx] += rain_fall
cx, cy = nx, ny # set current cell to next cell
return river_source_list | [
"def",
"river_sources",
"(",
"world",
",",
"water_flow",
",",
"water_path",
")",
":",
"river_source_list",
"=",
"[",
"]",
"# Using the wind and rainfall data, create river 'seeds' by",
"# flowing rainfall along paths until a 'flow' threshold is reached",
"# and we have a begi... | Find places on map where sources of river can be found | [
"Find",
"places",
"on",
"map",
"where",
"sources",
"of",
"river",
"can",
"be",
"found"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/simulations/erosion.py#L126-L176 | train | 208,483 |
Mindwerks/worldengine | worldengine/simulations/erosion.py | ErosionSimulation.cleanUpFlow | def cleanUpFlow(self, river, world):
'''Validate that for each point in river is equal to or lower than the
last'''
celevation = 1.0
for r in river:
rx, ry = r
relevation = world.layers['elevation'].data[ry, rx]
if relevation <= celevation:
celevation = relevation
elif relevation > celevation:
world.layers['elevation'].data[ry, rx] = celevation
return river | python | def cleanUpFlow(self, river, world):
'''Validate that for each point in river is equal to or lower than the
last'''
celevation = 1.0
for r in river:
rx, ry = r
relevation = world.layers['elevation'].data[ry, rx]
if relevation <= celevation:
celevation = relevation
elif relevation > celevation:
world.layers['elevation'].data[ry, rx] = celevation
return river | [
"def",
"cleanUpFlow",
"(",
"self",
",",
"river",
",",
"world",
")",
":",
"celevation",
"=",
"1.0",
"for",
"r",
"in",
"river",
":",
"rx",
",",
"ry",
"=",
"r",
"relevation",
"=",
"world",
".",
"layers",
"[",
"'elevation'",
"]",
".",
"data",
"[",
"ry"... | Validate that for each point in river is equal to or lower than the
last | [
"Validate",
"that",
"for",
"each",
"point",
"in",
"river",
"is",
"equal",
"to",
"or",
"lower",
"than",
"the",
"last"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/simulations/erosion.py#L289-L300 | train | 208,484 |
Mindwerks/worldengine | worldengine/simulations/erosion.py | ErosionSimulation.findLowerElevation | def findLowerElevation(self, source, world):
'''Try to find a lower elevation with in a range of an increasing
circle's radius and try to find the best path and return it'''
x, y = source
currentRadius = 1
maxRadius = 40
lowestElevation = world.layers['elevation'].data[y, x]
destination = []
notFound = True
isWrapped = False
wrapped = []
while notFound and currentRadius <= maxRadius:
for cx in range(-currentRadius, currentRadius + 1):
for cy in range(-currentRadius, currentRadius + 1):
rx, ry = x + cx, y + cy
# are we within bounds?
if not self.wrap and not world.contains((rx, ry)):
continue
# are we within a circle?
if not in_circle(currentRadius, x, y, rx, ry):
continue
rx, ry = overflow(rx, world.width), overflow(ry,
world.height)
# if utilities.outOfBounds([x+cx, y+cy], self.size):
# print "Fixed:",x ,y, rx, ry
elevation = world.layers['elevation'].data[ry, rx]
# have we found a lower elevation?
if elevation < lowestElevation:
lowestElevation = elevation
destination = [rx, ry]
notFound = False
if not world.contains((x + cx, y + cy)):
wrapped.append(destination)
currentRadius += 1
if destination in wrapped:
isWrapped = True
# print "Wrapped lower elevation found:", rx, ry, "!"
return isWrapped, destination | python | def findLowerElevation(self, source, world):
'''Try to find a lower elevation with in a range of an increasing
circle's radius and try to find the best path and return it'''
x, y = source
currentRadius = 1
maxRadius = 40
lowestElevation = world.layers['elevation'].data[y, x]
destination = []
notFound = True
isWrapped = False
wrapped = []
while notFound and currentRadius <= maxRadius:
for cx in range(-currentRadius, currentRadius + 1):
for cy in range(-currentRadius, currentRadius + 1):
rx, ry = x + cx, y + cy
# are we within bounds?
if not self.wrap and not world.contains((rx, ry)):
continue
# are we within a circle?
if not in_circle(currentRadius, x, y, rx, ry):
continue
rx, ry = overflow(rx, world.width), overflow(ry,
world.height)
# if utilities.outOfBounds([x+cx, y+cy], self.size):
# print "Fixed:",x ,y, rx, ry
elevation = world.layers['elevation'].data[ry, rx]
# have we found a lower elevation?
if elevation < lowestElevation:
lowestElevation = elevation
destination = [rx, ry]
notFound = False
if not world.contains((x + cx, y + cy)):
wrapped.append(destination)
currentRadius += 1
if destination in wrapped:
isWrapped = True
# print "Wrapped lower elevation found:", rx, ry, "!"
return isWrapped, destination | [
"def",
"findLowerElevation",
"(",
"self",
",",
"source",
",",
"world",
")",
":",
"x",
",",
"y",
"=",
"source",
"currentRadius",
"=",
"1",
"maxRadius",
"=",
"40",
"lowestElevation",
"=",
"world",
".",
"layers",
"[",
"'elevation'",
"]",
".",
"data",
"[",
... | Try to find a lower elevation with in a range of an increasing
circle's radius and try to find the best path and return it | [
"Try",
"to",
"find",
"a",
"lower",
"elevation",
"with",
"in",
"a",
"range",
"of",
"an",
"increasing",
"circle",
"s",
"radius",
"and",
"try",
"to",
"find",
"the",
"best",
"path",
"and",
"return",
"it"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/simulations/erosion.py#L302-L347 | train | 208,485 |
Mindwerks/worldengine | worldengine/simulations/erosion.py | ErosionSimulation.rivermap_update | def rivermap_update(self, river, water_flow, rivermap, precipitations):
"""Update the rivermap with the rainfall that is to become
the waterflow"""
isSeed = True
px, py = (0, 0)
for x, y in river:
if isSeed:
rivermap[y, x] = water_flow[y, x]
isSeed = False
else:
rivermap[y, x] = precipitations[y, x] + rivermap[py, px]
px, py = x, y | python | def rivermap_update(self, river, water_flow, rivermap, precipitations):
"""Update the rivermap with the rainfall that is to become
the waterflow"""
isSeed = True
px, py = (0, 0)
for x, y in river:
if isSeed:
rivermap[y, x] = water_flow[y, x]
isSeed = False
else:
rivermap[y, x] = precipitations[y, x] + rivermap[py, px]
px, py = x, y | [
"def",
"rivermap_update",
"(",
"self",
",",
"river",
",",
"water_flow",
",",
"rivermap",
",",
"precipitations",
")",
":",
"isSeed",
"=",
"True",
"px",
",",
"py",
"=",
"(",
"0",
",",
"0",
")",
"for",
"x",
",",
"y",
"in",
"river",
":",
"if",
"isSeed"... | Update the rivermap with the rainfall that is to become
the waterflow | [
"Update",
"the",
"rivermap",
"with",
"the",
"rainfall",
"that",
"is",
"to",
"become",
"the",
"waterflow"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/simulations/erosion.py#L393-L405 | train | 208,486 |
Mindwerks/worldengine | worldengine/drawing_functions.py | draw_rivers_on_image | def draw_rivers_on_image(world, target, factor=1):
"""Draw only the rivers, it expect the background to be in place
"""
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)) and (world.layers['river_map'].data[y, x] > 0.0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 0, 128, 255))
if world.is_land((x, y)) and (world.layers['lake_map'].data[y, x] != 0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 100, 128, 255)) | python | def draw_rivers_on_image(world, target, factor=1):
"""Draw only the rivers, it expect the background to be in place
"""
for y in range(world.height):
for x in range(world.width):
if world.is_land((x, y)) and (world.layers['river_map'].data[y, x] > 0.0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 0, 128, 255))
if world.is_land((x, y)) and (world.layers['lake_map'].data[y, x] != 0):
for dx in range(factor):
for dy in range(factor):
target.set_pixel(x * factor + dx, y * factor + dy, (0, 100, 128, 255)) | [
"def",
"draw_rivers_on_image",
"(",
"world",
",",
"target",
",",
"factor",
"=",
"1",
")",
":",
"for",
"y",
"in",
"range",
"(",
"world",
".",
"height",
")",
":",
"for",
"x",
"in",
"range",
"(",
"world",
".",
"width",
")",
":",
"if",
"world",
".",
... | Draw only the rivers, it expect the background to be in place | [
"Draw",
"only",
"the",
"rivers",
"it",
"expect",
"the",
"background",
"to",
"be",
"in",
"place"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/drawing_functions.py#L39-L52 | train | 208,487 |
Mindwerks/worldengine | worldengine/generation.py | center_land | def center_land(world):
"""Translate the map horizontally and vertically to put as much ocean as
possible at the borders. It operates on elevation and plates map"""
y_sums = world.layers['elevation'].data.sum(1) # 1 == sum along x-axis
y_with_min_sum = y_sums.argmin()
if get_verbose():
print("geo.center_land: height complete")
x_sums = world.layers['elevation'].data.sum(0) # 0 == sum along y-axis
x_with_min_sum = x_sums.argmin()
if get_verbose():
print("geo.center_land: width complete")
latshift = 0
world.layers['elevation'].data = numpy.roll(numpy.roll(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
world.layers['plates'].data = numpy.roll(numpy.roll(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
if get_verbose():
print("geo.center_land: width complete") | python | def center_land(world):
"""Translate the map horizontally and vertically to put as much ocean as
possible at the borders. It operates on elevation and plates map"""
y_sums = world.layers['elevation'].data.sum(1) # 1 == sum along x-axis
y_with_min_sum = y_sums.argmin()
if get_verbose():
print("geo.center_land: height complete")
x_sums = world.layers['elevation'].data.sum(0) # 0 == sum along y-axis
x_with_min_sum = x_sums.argmin()
if get_verbose():
print("geo.center_land: width complete")
latshift = 0
world.layers['elevation'].data = numpy.roll(numpy.roll(world.layers['elevation'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
world.layers['plates'].data = numpy.roll(numpy.roll(world.layers['plates'].data, -y_with_min_sum + latshift, axis=0), - x_with_min_sum, axis=1)
if get_verbose():
print("geo.center_land: width complete") | [
"def",
"center_land",
"(",
"world",
")",
":",
"y_sums",
"=",
"world",
".",
"layers",
"[",
"'elevation'",
"]",
".",
"data",
".",
"sum",
"(",
"1",
")",
"# 1 == sum along x-axis",
"y_with_min_sum",
"=",
"y_sums",
".",
"argmin",
"(",
")",
"if",
"get_verbose",
... | Translate the map horizontally and vertically to put as much ocean as
possible at the borders. It operates on elevation and plates map | [
"Translate",
"the",
"map",
"horizontally",
"and",
"vertically",
"to",
"put",
"as",
"much",
"ocean",
"as",
"possible",
"at",
"the",
"borders",
".",
"It",
"operates",
"on",
"elevation",
"and",
"plates",
"map"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/generation.py#L23-L41 | train | 208,488 |
Mindwerks/worldengine | worldengine/generation.py | place_oceans_at_map_borders | def place_oceans_at_map_borders(world):
"""
Lower the elevation near the border of the map
"""
ocean_border = int(min(30, max(world.width / 5, world.height / 5)))
def place_ocean(x, y, i):
world.layers['elevation'].data[y, x] = \
(world.layers['elevation'].data[y, x] * i) / ocean_border
for x in range(world.width):
for i in range(ocean_border):
place_ocean(x, i, i)
place_ocean(x, world.height - i - 1, i)
for y in range(world.height):
for i in range(ocean_border):
place_ocean(i, y, i)
place_ocean(world.width - i - 1, y, i) | python | def place_oceans_at_map_borders(world):
"""
Lower the elevation near the border of the map
"""
ocean_border = int(min(30, max(world.width / 5, world.height / 5)))
def place_ocean(x, y, i):
world.layers['elevation'].data[y, x] = \
(world.layers['elevation'].data[y, x] * i) / ocean_border
for x in range(world.width):
for i in range(ocean_border):
place_ocean(x, i, i)
place_ocean(x, world.height - i - 1, i)
for y in range(world.height):
for i in range(ocean_border):
place_ocean(i, y, i)
place_ocean(world.width - i - 1, y, i) | [
"def",
"place_oceans_at_map_borders",
"(",
"world",
")",
":",
"ocean_border",
"=",
"int",
"(",
"min",
"(",
"30",
",",
"max",
"(",
"world",
".",
"width",
"/",
"5",
",",
"world",
".",
"height",
"/",
"5",
")",
")",
")",
"def",
"place_ocean",
"(",
"x",
... | Lower the elevation near the border of the map | [
"Lower",
"the",
"elevation",
"near",
"the",
"border",
"of",
"the",
"map"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/generation.py#L44-L63 | train | 208,489 |
Mindwerks/worldengine | worldengine/generation.py | harmonize_ocean | def harmonize_ocean(ocean, elevation, ocean_level):
"""
The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform
"""
shallow_sea = ocean_level * 0.85
midpoint = shallow_sea / 2.0
ocean_points = numpy.logical_and(elevation < shallow_sea, ocean)
shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points)
elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0)
deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points)
elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0) | python | def harmonize_ocean(ocean, elevation, ocean_level):
"""
The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform
"""
shallow_sea = ocean_level * 0.85
midpoint = shallow_sea / 2.0
ocean_points = numpy.logical_and(elevation < shallow_sea, ocean)
shallow_ocean = numpy.logical_and(elevation < midpoint, ocean_points)
elevation[shallow_ocean] = midpoint - ((midpoint - elevation[shallow_ocean]) / 5.0)
deep_ocean = numpy.logical_and(elevation > midpoint, ocean_points)
elevation[deep_ocean] = midpoint + ((elevation[deep_ocean] - midpoint) / 5.0) | [
"def",
"harmonize_ocean",
"(",
"ocean",
",",
"elevation",
",",
"ocean_level",
")",
":",
"shallow_sea",
"=",
"ocean_level",
"*",
"0.85",
"midpoint",
"=",
"shallow_sea",
"/",
"2.0",
"ocean_points",
"=",
"numpy",
".",
"logical_and",
"(",
"elevation",
"<",
"shallo... | The goal of this function is to make the ocean floor less noisy.
The underwater erosion should cause the ocean floor to be more uniform | [
"The",
"goal",
"of",
"this",
"function",
"is",
"to",
"make",
"the",
"ocean",
"floor",
"less",
"noisy",
".",
"The",
"underwater",
"erosion",
"should",
"cause",
"the",
"ocean",
"floor",
"to",
"be",
"more",
"uniform"
] | 64dff8eb7824ce46b5b6cb8006bcef21822ef144 | https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/generation.py#L122-L137 | train | 208,490 |
pypa/setuptools_scm | src/setuptools_scm/utils.py | _always_strings | def _always_strings(env_dict):
"""
On Windows and Python 2, environment dictionaries must be strings
and not unicode.
"""
if IS_WINDOWS or PY2:
env_dict.update((key, str(value)) for (key, value) in env_dict.items())
return env_dict | python | def _always_strings(env_dict):
"""
On Windows and Python 2, environment dictionaries must be strings
and not unicode.
"""
if IS_WINDOWS or PY2:
env_dict.update((key, str(value)) for (key, value) in env_dict.items())
return env_dict | [
"def",
"_always_strings",
"(",
"env_dict",
")",
":",
"if",
"IS_WINDOWS",
"or",
"PY2",
":",
"env_dict",
".",
"update",
"(",
"(",
"key",
",",
"str",
"(",
"value",
")",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"env_dict",
".",
"items",
"(",
")... | On Windows and Python 2, environment dictionaries must be strings
and not unicode. | [
"On",
"Windows",
"and",
"Python",
"2",
"environment",
"dictionaries",
"must",
"be",
"strings",
"and",
"not",
"unicode",
"."
] | 6e99dc1afdce38dc0c3fb1a31bd50a152fc55cce | https://github.com/pypa/setuptools_scm/blob/6e99dc1afdce38dc0c3fb1a31bd50a152fc55cce/src/setuptools_scm/utils.py#L35-L42 | train | 208,491 |
pypa/setuptools_scm | src/setuptools_scm/win_py31_compat.py | compat_stat | def compat_stat(path):
"""
Generate stat as found on Python 3.2 and later.
"""
stat = os.stat(path)
info = get_file_info(path)
# rewrite st_ino, st_dev, and st_nlink based on file info
return nt.stat_result(
(stat.st_mode,) +
(info.file_index, info.volume_serial_number, info.number_of_links) +
stat[4:]
) | python | def compat_stat(path):
"""
Generate stat as found on Python 3.2 and later.
"""
stat = os.stat(path)
info = get_file_info(path)
# rewrite st_ino, st_dev, and st_nlink based on file info
return nt.stat_result(
(stat.st_mode,) +
(info.file_index, info.volume_serial_number, info.number_of_links) +
stat[4:]
) | [
"def",
"compat_stat",
"(",
"path",
")",
":",
"stat",
"=",
"os",
".",
"stat",
"(",
"path",
")",
"info",
"=",
"get_file_info",
"(",
"path",
")",
"# rewrite st_ino, st_dev, and st_nlink based on file info",
"return",
"nt",
".",
"stat_result",
"(",
"(",
"stat",
".... | Generate stat as found on Python 3.2 and later. | [
"Generate",
"stat",
"as",
"found",
"on",
"Python",
"3",
".",
"2",
"and",
"later",
"."
] | 6e99dc1afdce38dc0c3fb1a31bd50a152fc55cce | https://github.com/pypa/setuptools_scm/blob/6e99dc1afdce38dc0c3fb1a31bd50a152fc55cce/src/setuptools_scm/win_py31_compat.py#L163-L174 | train | 208,492 |
pypa/setuptools_scm | src/setuptools_scm/file_finder.py | scm_find_files | def scm_find_files(path, scm_files, scm_dirs):
""" setuptools compatible file finder that follows symlinks
- path: the root directory from which to search
- scm_files: set of scm controlled files and symlinks
(including symlinks to directories)
- scm_dirs: set of scm controlled directories
(including directories containing no scm controlled files)
scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
with normalized case (normcase)
Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
adding-support-for-revision-control-systems
"""
realpath = os.path.normcase(os.path.realpath(path))
seen = set()
res = []
for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# dirpath with symlinks resolved
realdirpath = os.path.normcase(os.path.realpath(dirpath))
def _link_not_in_scm(n):
fn = os.path.join(realdirpath, os.path.normcase(n))
return os.path.islink(fn) and fn not in scm_files
if realdirpath not in scm_dirs:
# directory not in scm, don't walk it's content
dirnames[:] = []
continue
if (
os.path.islink(dirpath)
and not os.path.relpath(realdirpath, realpath).startswith(os.pardir)
):
# a symlink to a directory not outside path:
# we keep it in the result and don't walk its content
res.append(os.path.join(path, os.path.relpath(dirpath, path)))
dirnames[:] = []
continue
if realdirpath in seen:
# symlink loop protection
dirnames[:] = []
continue
dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
for filename in filenames:
if _link_not_in_scm(filename):
continue
# dirpath + filename with symlinks preserved
fullfilename = os.path.join(dirpath, filename)
if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
res.append(os.path.join(path, os.path.relpath(fullfilename, path)))
seen.add(realdirpath)
return res | python | def scm_find_files(path, scm_files, scm_dirs):
""" setuptools compatible file finder that follows symlinks
- path: the root directory from which to search
- scm_files: set of scm controlled files and symlinks
(including symlinks to directories)
- scm_dirs: set of scm controlled directories
(including directories containing no scm controlled files)
scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
with normalized case (normcase)
Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
adding-support-for-revision-control-systems
"""
realpath = os.path.normcase(os.path.realpath(path))
seen = set()
res = []
for dirpath, dirnames, filenames in os.walk(realpath, followlinks=True):
# dirpath with symlinks resolved
realdirpath = os.path.normcase(os.path.realpath(dirpath))
def _link_not_in_scm(n):
fn = os.path.join(realdirpath, os.path.normcase(n))
return os.path.islink(fn) and fn not in scm_files
if realdirpath not in scm_dirs:
# directory not in scm, don't walk it's content
dirnames[:] = []
continue
if (
os.path.islink(dirpath)
and not os.path.relpath(realdirpath, realpath).startswith(os.pardir)
):
# a symlink to a directory not outside path:
# we keep it in the result and don't walk its content
res.append(os.path.join(path, os.path.relpath(dirpath, path)))
dirnames[:] = []
continue
if realdirpath in seen:
# symlink loop protection
dirnames[:] = []
continue
dirnames[:] = [dn for dn in dirnames if not _link_not_in_scm(dn)]
for filename in filenames:
if _link_not_in_scm(filename):
continue
# dirpath + filename with symlinks preserved
fullfilename = os.path.join(dirpath, filename)
if os.path.normcase(os.path.realpath(fullfilename)) in scm_files:
res.append(os.path.join(path, os.path.relpath(fullfilename, path)))
seen.add(realdirpath)
return res | [
"def",
"scm_find_files",
"(",
"path",
",",
"scm_files",
",",
"scm_dirs",
")",
":",
"realpath",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"path",
")",
")",
"seen",
"=",
"set",
"(",
")",
"res",
"=",
"[",
... | setuptools compatible file finder that follows symlinks
- path: the root directory from which to search
- scm_files: set of scm controlled files and symlinks
(including symlinks to directories)
- scm_dirs: set of scm controlled directories
(including directories containing no scm controlled files)
scm_files and scm_dirs must be absolute with symlinks resolved (realpath),
with normalized case (normcase)
Spec here: http://setuptools.readthedocs.io/en/latest/setuptools.html#\
adding-support-for-revision-control-systems | [
"setuptools",
"compatible",
"file",
"finder",
"that",
"follows",
"symlinks"
] | 6e99dc1afdce38dc0c3fb1a31bd50a152fc55cce | https://github.com/pypa/setuptools_scm/blob/6e99dc1afdce38dc0c3fb1a31bd50a152fc55cce/src/setuptools_scm/file_finder.py#L4-L56 | train | 208,493 |
cartologic/cartoview | cartoview/apps_handler/handlers.py | AppsORM.session | def session(self):
""" Creates a context with an open SQLAlchemy session.
"""
engine = self.engine
connection = engine.connect()
db_session = scoped_session(
sessionmaker(autocommit=False, autoflush=True, bind=engine))
yield db_session
db_session.close()
connection.close() | python | def session(self):
""" Creates a context with an open SQLAlchemy session.
"""
engine = self.engine
connection = engine.connect()
db_session = scoped_session(
sessionmaker(autocommit=False, autoflush=True, bind=engine))
yield db_session
db_session.close()
connection.close() | [
"def",
"session",
"(",
"self",
")",
":",
"engine",
"=",
"self",
".",
"engine",
"connection",
"=",
"engine",
".",
"connect",
"(",
")",
"db_session",
"=",
"scoped_session",
"(",
"sessionmaker",
"(",
"autocommit",
"=",
"False",
",",
"autoflush",
"=",
"True",
... | Creates a context with an open SQLAlchemy session. | [
"Creates",
"a",
"context",
"with",
"an",
"open",
"SQLAlchemy",
"session",
"."
] | 8eea73a7e363ac806dbfca3ca61f7e9d2c839b6b | https://github.com/cartologic/cartoview/blob/8eea73a7e363ac806dbfca3ca61f7e9d2c839b6b/cartoview/apps_handler/handlers.py#L29-L38 | train | 208,494 |
cartologic/cartoview | pavement.py | kill | def kill(arg1, arg2):
"""Stops a proces that contains arg1 and is filtered by arg2
"""
from subprocess import Popen, PIPE
# Wait until ready
t0 = time.time()
# Wait no more than these many seconds
time_out = 30
running = True
while running and time.time() - t0 < time_out:
if os.name == 'nt':
p = Popen(
'tasklist | find "%s"' % arg1,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=False)
else:
p = Popen(
'ps aux | grep %s' % arg1,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=True)
lines = p.stdout.readlines()
running = False
for line in lines:
# this kills all java.exe and python including self in windows
if ('%s' % arg2 in line) or (os.name == 'nt'
and '%s' % arg1 in line):
running = True
# Get pid
fields = line.strip().split()
info('Stopping %s (process number %s)' % (arg1, fields[1]))
if os.name == 'nt':
kill = 'taskkill /F /PID "%s"' % fields[1]
else:
kill = 'kill -9 %s 2> /dev/null' % fields[1]
os.system(kill)
# Give it a little more time
time.sleep(1)
else:
pass
if running:
raise Exception('Could not stop %s: '
'Running processes are\n%s' % (arg1, '\n'.join(
[l.strip() for l in lines]))) | python | def kill(arg1, arg2):
"""Stops a proces that contains arg1 and is filtered by arg2
"""
from subprocess import Popen, PIPE
# Wait until ready
t0 = time.time()
# Wait no more than these many seconds
time_out = 30
running = True
while running and time.time() - t0 < time_out:
if os.name == 'nt':
p = Popen(
'tasklist | find "%s"' % arg1,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=False)
else:
p = Popen(
'ps aux | grep %s' % arg1,
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=True)
lines = p.stdout.readlines()
running = False
for line in lines:
# this kills all java.exe and python including self in windows
if ('%s' % arg2 in line) or (os.name == 'nt'
and '%s' % arg1 in line):
running = True
# Get pid
fields = line.strip().split()
info('Stopping %s (process number %s)' % (arg1, fields[1]))
if os.name == 'nt':
kill = 'taskkill /F /PID "%s"' % fields[1]
else:
kill = 'kill -9 %s 2> /dev/null' % fields[1]
os.system(kill)
# Give it a little more time
time.sleep(1)
else:
pass
if running:
raise Exception('Could not stop %s: '
'Running processes are\n%s' % (arg1, '\n'.join(
[l.strip() for l in lines]))) | [
"def",
"kill",
"(",
"arg1",
",",
"arg2",
")",
":",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
"# Wait until ready",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"# Wait no more than these many seconds",
"time_out",
"=",
"30",
"running",
"=",
"True",
... | Stops a proces that contains arg1 and is filtered by arg2 | [
"Stops",
"a",
"proces",
"that",
"contains",
"arg1",
"and",
"is",
"filtered",
"by",
"arg2"
] | 8eea73a7e363ac806dbfca3ca61f7e9d2c839b6b | https://github.com/cartologic/cartoview/blob/8eea73a7e363ac806dbfca3ca61f7e9d2c839b6b/pavement.py#L410-L466 | train | 208,495 |
klen/aioauth-client | aioauth_client.py | HmacSha1Signature.sign | def sign(self, consumer_secret, method, url, oauth_token_secret=None,
**params):
"""Create a signature using HMAC-SHA1."""
# build the url the same way aiohttp will build the query later on
# cf https://github.com/KeepSafe/aiohttp/blob/master/aiohttp/client.py#L151
# and https://github.com/KeepSafe/aiohttp/blob/master/aiohttp/client_reqrep.py#L81
url = yarl.URL(url).with_query(sorted(params.items()))
url, params = str(url).split('?', 1)
method = method.upper()
signature = b"&".join(map(self._escape, (method, url, params)))
key = self._escape(consumer_secret) + b"&"
if oauth_token_secret:
key += self._escape(oauth_token_secret)
hashed = hmac.new(key, signature, sha1)
return base64.b64encode(hashed.digest()).decode() | python | def sign(self, consumer_secret, method, url, oauth_token_secret=None,
**params):
"""Create a signature using HMAC-SHA1."""
# build the url the same way aiohttp will build the query later on
# cf https://github.com/KeepSafe/aiohttp/blob/master/aiohttp/client.py#L151
# and https://github.com/KeepSafe/aiohttp/blob/master/aiohttp/client_reqrep.py#L81
url = yarl.URL(url).with_query(sorted(params.items()))
url, params = str(url).split('?', 1)
method = method.upper()
signature = b"&".join(map(self._escape, (method, url, params)))
key = self._escape(consumer_secret) + b"&"
if oauth_token_secret:
key += self._escape(oauth_token_secret)
hashed = hmac.new(key, signature, sha1)
return base64.b64encode(hashed.digest()).decode() | [
"def",
"sign",
"(",
"self",
",",
"consumer_secret",
",",
"method",
",",
"url",
",",
"oauth_token_secret",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"# build the url the same way aiohttp will build the query later on",
"# cf https://github.com/KeepSafe/aiohttp/blob/mas... | Create a signature using HMAC-SHA1. | [
"Create",
"a",
"signature",
"using",
"HMAC",
"-",
"SHA1",
"."
] | 54f58249496c26965adb4f752f2b24cfe18d0084 | https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L60-L77 | train | 208,496 |
klen/aioauth-client | aioauth_client.py | PlaintextSignature.sign | def sign(self, consumer_secret, method, url, oauth_token_secret=None,
**params):
"""Create a signature using PLAINTEXT."""
key = self._escape(consumer_secret) + b'&'
if oauth_token_secret:
key += self._escape(oauth_token_secret)
return key.decode() | python | def sign(self, consumer_secret, method, url, oauth_token_secret=None,
**params):
"""Create a signature using PLAINTEXT."""
key = self._escape(consumer_secret) + b'&'
if oauth_token_secret:
key += self._escape(oauth_token_secret)
return key.decode() | [
"def",
"sign",
"(",
"self",
",",
"consumer_secret",
",",
"method",
",",
"url",
",",
"oauth_token_secret",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"key",
"=",
"self",
".",
"_escape",
"(",
"consumer_secret",
")",
"+",
"b'&'",
"if",
"oauth_token_sec... | Create a signature using PLAINTEXT. | [
"Create",
"a",
"signature",
"using",
"PLAINTEXT",
"."
] | 54f58249496c26965adb4f752f2b24cfe18d0084 | https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L85-L91 | train | 208,497 |
klen/aioauth-client | aioauth_client.py | Client._get_url | def _get_url(self, url):
"""Build provider's url. Join with base_url part if needed."""
if self.base_url and not url.startswith(('http://', 'https://')):
return urljoin(self.base_url, url)
return url | python | def _get_url(self, url):
"""Build provider's url. Join with base_url part if needed."""
if self.base_url and not url.startswith(('http://', 'https://')):
return urljoin(self.base_url, url)
return url | [
"def",
"_get_url",
"(",
"self",
",",
"url",
")",
":",
"if",
"self",
".",
"base_url",
"and",
"not",
"url",
".",
"startswith",
"(",
"(",
"'http://'",
",",
"'https://'",
")",
")",
":",
"return",
"urljoin",
"(",
"self",
".",
"base_url",
",",
"url",
")",
... | Build provider's url. Join with base_url part if needed. | [
"Build",
"provider",
"s",
"url",
".",
"Join",
"with",
"base_url",
"part",
"if",
"needed",
"."
] | 54f58249496c26965adb4f752f2b24cfe18d0084 | https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L127-L131 | train | 208,498 |
klen/aioauth-client | aioauth_client.py | Client._request | async def _request(self, method, url, loop=None, timeout=None, **kwargs):
"""Make a request through AIOHTTP."""
session = self.session or aiohttp.ClientSession(
loop=loop, conn_timeout=timeout, read_timeout=timeout)
try:
async with session.request(method, url, **kwargs) as response:
if response.status / 100 > 2:
raise web.HTTPBadRequest(
reason='HTTP status code: %s' % response.status)
if 'json' in response.headers.get('CONTENT-TYPE'):
data = await response.json()
else:
data = await response.text()
data = dict(parse_qsl(data))
return data
except asyncio.TimeoutError:
raise web.HTTPBadRequest(reason='HTTP Timeout')
finally:
if not self.session and not session.closed:
await session.close() | python | async def _request(self, method, url, loop=None, timeout=None, **kwargs):
"""Make a request through AIOHTTP."""
session = self.session or aiohttp.ClientSession(
loop=loop, conn_timeout=timeout, read_timeout=timeout)
try:
async with session.request(method, url, **kwargs) as response:
if response.status / 100 > 2:
raise web.HTTPBadRequest(
reason='HTTP status code: %s' % response.status)
if 'json' in response.headers.get('CONTENT-TYPE'):
data = await response.json()
else:
data = await response.text()
data = dict(parse_qsl(data))
return data
except asyncio.TimeoutError:
raise web.HTTPBadRequest(reason='HTTP Timeout')
finally:
if not self.session and not session.closed:
await session.close() | [
"async",
"def",
"_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"loop",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"session",
"=",
"self",
".",
"session",
"or",
"aiohttp",
".",
"ClientSession",
"(",
"loop",
... | Make a request through AIOHTTP. | [
"Make",
"a",
"request",
"through",
"AIOHTTP",
"."
] | 54f58249496c26965adb4f752f2b24cfe18d0084 | https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L141-L165 | train | 208,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.