repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
google/python-gflags | gflags/_helpers.py | define_both_methods | def define_both_methods(class_name, class_dict, old_name, new_name): # pylint: disable=invalid-name
"""Function to help CamelCase to PEP8 style class methods migration.
For any class definition:
1. Assert it does not define both old and new methods,
otherwise it does not work.
2. If it defines the old method, create the same new method.
3. If it defines the new method, create the same old method.
Args:
class_name: the class name.
class_dict: the class dictionary.
old_name: old method's name.
new_name: new method's name.
Raises:
AssertionError: raised when the class defines both the old_name and
new_name.
"""
assert old_name not in class_dict or new_name not in class_dict, (
'Class "{}" cannot define both "{}" and "{}" methods.'.format(
class_name, old_name, new_name))
if old_name in class_dict:
class_dict[new_name] = class_dict[old_name]
elif new_name in class_dict:
class_dict[old_name] = class_dict[new_name] | python | def define_both_methods(class_name, class_dict, old_name, new_name): # pylint: disable=invalid-name
"""Function to help CamelCase to PEP8 style class methods migration.
For any class definition:
1. Assert it does not define both old and new methods,
otherwise it does not work.
2. If it defines the old method, create the same new method.
3. If it defines the new method, create the same old method.
Args:
class_name: the class name.
class_dict: the class dictionary.
old_name: old method's name.
new_name: new method's name.
Raises:
AssertionError: raised when the class defines both the old_name and
new_name.
"""
assert old_name not in class_dict or new_name not in class_dict, (
'Class "{}" cannot define both "{}" and "{}" methods.'.format(
class_name, old_name, new_name))
if old_name in class_dict:
class_dict[new_name] = class_dict[old_name]
elif new_name in class_dict:
class_dict[old_name] = class_dict[new_name] | [
"def",
"define_both_methods",
"(",
"class_name",
",",
"class_dict",
",",
"old_name",
",",
"new_name",
")",
":",
"# pylint: disable=invalid-name",
"assert",
"old_name",
"not",
"in",
"class_dict",
"or",
"new_name",
"not",
"in",
"class_dict",
",",
"(",
"'Class \"{}\" c... | Function to help CamelCase to PEP8 style class methods migration.
For any class definition:
1. Assert it does not define both old and new methods,
otherwise it does not work.
2. If it defines the old method, create the same new method.
3. If it defines the new method, create the same old method.
Args:
class_name: the class name.
class_dict: the class dictionary.
old_name: old method's name.
new_name: new method's name.
Raises:
AssertionError: raised when the class defines both the old_name and
new_name. | [
"Function",
"to",
"help",
"CamelCase",
"to",
"PEP8",
"style",
"class",
"methods",
"migration",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/_helpers.py#L405-L430 | train | 22,900 |
google/python-gflags | gflags/flagvalues.py | FlagValues._IsUnparsedFlagAccessAllowed | def _IsUnparsedFlagAccessAllowed(self, name):
"""Determine whether to allow unparsed flag access or not."""
if _UNPARSED_FLAG_ACCESS_ENV_NAME in os.environ:
# We've been told explicitly what to do.
allow_unparsed_flag_access = (
os.getenv(_UNPARSED_FLAG_ACCESS_ENV_NAME) == '1')
elif self.__dict__['__reset_called']:
# Raise exception if .Reset() was called. This mostly happens in tests.
allow_unparsed_flag_access = False
elif _helpers.IsRunningTest():
# Staged "rollout", based on name of the flag so that we don't break
# everyone. Hashing the flag is a way of choosing a random but
# consistent subset of flags to lock down which we can make larger
# over time.
name_bytes = name.encode('utf8') if not isinstance(name, bytes) else name
flag_percentile = (
struct.unpack('<I', hashlib.md5(name_bytes).digest()[:4])[0] % 100)
allow_unparsed_flag_access = (
_UNPARSED_ACCESS_DISABLED_PERCENT <= flag_percentile)
else:
allow_unparsed_flag_access = True
return allow_unparsed_flag_access | python | def _IsUnparsedFlagAccessAllowed(self, name):
"""Determine whether to allow unparsed flag access or not."""
if _UNPARSED_FLAG_ACCESS_ENV_NAME in os.environ:
# We've been told explicitly what to do.
allow_unparsed_flag_access = (
os.getenv(_UNPARSED_FLAG_ACCESS_ENV_NAME) == '1')
elif self.__dict__['__reset_called']:
# Raise exception if .Reset() was called. This mostly happens in tests.
allow_unparsed_flag_access = False
elif _helpers.IsRunningTest():
# Staged "rollout", based on name of the flag so that we don't break
# everyone. Hashing the flag is a way of choosing a random but
# consistent subset of flags to lock down which we can make larger
# over time.
name_bytes = name.encode('utf8') if not isinstance(name, bytes) else name
flag_percentile = (
struct.unpack('<I', hashlib.md5(name_bytes).digest()[:4])[0] % 100)
allow_unparsed_flag_access = (
_UNPARSED_ACCESS_DISABLED_PERCENT <= flag_percentile)
else:
allow_unparsed_flag_access = True
return allow_unparsed_flag_access | [
"def",
"_IsUnparsedFlagAccessAllowed",
"(",
"self",
",",
"name",
")",
":",
"if",
"_UNPARSED_FLAG_ACCESS_ENV_NAME",
"in",
"os",
".",
"environ",
":",
"# We've been told explicitly what to do.",
"allow_unparsed_flag_access",
"=",
"(",
"os",
".",
"getenv",
"(",
"_UNPARSED_F... | Determine whether to allow unparsed flag access or not. | [
"Determine",
"whether",
"to",
"allow",
"unparsed",
"flag",
"access",
"or",
"not",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L490-L511 | train | 22,901 |
google/python-gflags | gflags/flagvalues.py | FlagValues._AssertValidators | def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValueError: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.verify(self)
except exceptions.ValidationError as e:
message = validator.print_flags_with_values(self)
raise exceptions.IllegalFlagValueError('%s: %s' % (message, str(e))) | python | def _AssertValidators(self, validators):
"""Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValueError: if validation fails for at least one validator
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.verify(self)
except exceptions.ValidationError as e:
message = validator.print_flags_with_values(self)
raise exceptions.IllegalFlagValueError('%s: %s' % (message, str(e))) | [
"def",
"_AssertValidators",
"(",
"self",
",",
"validators",
")",
":",
"for",
"validator",
"in",
"sorted",
"(",
"validators",
",",
"key",
"=",
"lambda",
"validator",
":",
"validator",
".",
"insertion_index",
")",
":",
"try",
":",
"validator",
".",
"verify",
... | Assert if all validators in the list are satisfied.
Asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified
Raises:
AttributeError: if validators work with a non-existing flag.
IllegalFlagValueError: if validation fails for at least one validator | [
"Assert",
"if",
"all",
"validators",
"in",
"the",
"list",
"are",
"satisfied",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L561-L578 | train | 22,902 |
google/python-gflags | gflags/flagvalues.py | FlagValues._RemoveAllFlagAppearances | def _RemoveAllFlagAppearances(self, name):
"""Removes flag with name for all appearances.
A flag can be registered with its long name and an optional short name.
This method removes both of them. This is different than __delattr__.
Args:
name: Either flag's long name or short name.
Raises:
UnrecognizedFlagError: When flag name is not found.
"""
flag_dict = self.FlagDict()
if name not in flag_dict:
raise exceptions.UnrecognizedFlagError(name)
flag = flag_dict[name]
names_to_remove = {name}
names_to_remove.add(flag.name)
if flag.short_name:
names_to_remove.add(flag.short_name)
for n in names_to_remove:
self.__delattr__(n) | python | def _RemoveAllFlagAppearances(self, name):
"""Removes flag with name for all appearances.
A flag can be registered with its long name and an optional short name.
This method removes both of them. This is different than __delattr__.
Args:
name: Either flag's long name or short name.
Raises:
UnrecognizedFlagError: When flag name is not found.
"""
flag_dict = self.FlagDict()
if name not in flag_dict:
raise exceptions.UnrecognizedFlagError(name)
flag = flag_dict[name]
names_to_remove = {name}
names_to_remove.add(flag.name)
if flag.short_name:
names_to_remove.add(flag.short_name)
for n in names_to_remove:
self.__delattr__(n) | [
"def",
"_RemoveAllFlagAppearances",
"(",
"self",
",",
"name",
")",
":",
"flag_dict",
"=",
"self",
".",
"FlagDict",
"(",
")",
"if",
"name",
"not",
"in",
"flag_dict",
":",
"raise",
"exceptions",
".",
"UnrecognizedFlagError",
"(",
"name",
")",
"flag",
"=",
"f... | Removes flag with name for all appearances.
A flag can be registered with its long name and an optional short name.
This method removes both of them. This is different than __delattr__.
Args:
name: Either flag's long name or short name.
Raises:
UnrecognizedFlagError: When flag name is not found. | [
"Removes",
"flag",
"with",
"name",
"for",
"all",
"appearances",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L610-L631 | train | 22,903 |
google/python-gflags | gflags/flagvalues.py | FlagValues.GetHelp | def GetHelp(self, prefix='', include_special_flags=True):
"""Generates a help string for all known flags.
Args:
prefix: str, per-line output prefix.
include_special_flags: bool, whether to include description of
_SPECIAL_FLAGS, i.e. --flagfile and --undefok.
Returns:
str, formatted help message.
"""
# TODO(vrusinov): this function needs a test.
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = sys.argv[0]
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
if include_special_flags:
self.__RenderModuleFlags('gflags',
_helpers.SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
values = self.FlagDict().values()
if include_special_flags:
values.append(_helpers.SPECIAL_FLAGS.FlagDict().values())
self.__RenderFlagList(values, helplist, prefix)
return '\n'.join(helplist) | python | def GetHelp(self, prefix='', include_special_flags=True):
"""Generates a help string for all known flags.
Args:
prefix: str, per-line output prefix.
include_special_flags: bool, whether to include description of
_SPECIAL_FLAGS, i.e. --flagfile and --undefok.
Returns:
str, formatted help message.
"""
# TODO(vrusinov): this function needs a test.
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = sys.argv[0]
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
if include_special_flags:
self.__RenderModuleFlags('gflags',
_helpers.SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
# Just print one long list of flags.
values = self.FlagDict().values()
if include_special_flags:
values.append(_helpers.SPECIAL_FLAGS.FlagDict().values())
self.__RenderFlagList(values, helplist, prefix)
return '\n'.join(helplist) | [
"def",
"GetHelp",
"(",
"self",
",",
"prefix",
"=",
"''",
",",
"include_special_flags",
"=",
"True",
")",
":",
"# TODO(vrusinov): this function needs a test.",
"helplist",
"=",
"[",
"]",
"flags_by_module",
"=",
"self",
".",
"FlagsByModuleDict",
"(",
")",
"if",
"f... | Generates a help string for all known flags.
Args:
prefix: str, per-line output prefix.
include_special_flags: bool, whether to include description of
_SPECIAL_FLAGS, i.e. --flagfile and --undefok.
Returns:
str, formatted help message. | [
"Generates",
"a",
"help",
"string",
"for",
"all",
"known",
"flags",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L849-L886 | train | 22,904 |
google/python-gflags | gflags/flagvalues.py | FlagValues.__RenderOurModuleKeyFlags | def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=''):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix) | python | def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=''):
"""Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line.
"""
key_flags = self._GetKeyFlagsForModule(module)
if key_flags:
self.__RenderModuleFlags(module, key_flags, output_lines, prefix) | [
"def",
"__RenderOurModuleKeyFlags",
"(",
"self",
",",
"module",
",",
"output_lines",
",",
"prefix",
"=",
"''",
")",
":",
"key_flags",
"=",
"self",
".",
"_GetKeyFlagsForModule",
"(",
"module",
")",
"if",
"key_flags",
":",
"self",
".",
"__RenderModuleFlags",
"("... | Generates a help string for the key flags of a given module.
Args:
module: A module object or a module name (a string).
output_lines: A list of strings. The generated help message
lines will be appended to this list.
prefix: A string that is prepended to each generated help line. | [
"Generates",
"a",
"help",
"string",
"for",
"the",
"key",
"flags",
"of",
"a",
"given",
"module",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L901-L912 | train | 22,905 |
google/python-gflags | gflags/flagvalues.py | FlagValues.ModuleHelp | def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist) | python | def ModuleHelp(self, module):
"""Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module.
"""
helplist = []
self.__RenderOurModuleKeyFlags(module, helplist)
return '\n'.join(helplist) | [
"def",
"ModuleHelp",
"(",
"self",
",",
"module",
")",
":",
"helplist",
"=",
"[",
"]",
"self",
".",
"__RenderOurModuleKeyFlags",
"(",
"module",
",",
"helplist",
")",
"return",
"'\\n'",
".",
"join",
"(",
"helplist",
")"
] | Describe the key flags of a module.
Args:
module: A module object or a module name (a string).
Returns:
string describing the key flags of a module. | [
"Describe",
"the",
"key",
"flags",
"of",
"a",
"module",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/flagvalues.py#L914-L925 | train | 22,906 |
google/python-gflags | gflags/__init__.py | register_multi_flags_validator | def register_multi_flags_validator(flag_names,
multi_flags_checker,
message='Flags validation failed',
flag_values=FLAGS):
"""Adds a constraint to multiple flags.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_names: [str], a list of the flag names to be checked.
multi_flags_checker: callable, a function to validate the flag.
input - dictionary, with keys() being flag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags.ValidationError.
message: Error text to be shown to the user if checker returns False.
If checker raises gflags.ValidationError, message from the raised error
will be shown.
flag_values: An optional FlagValues instance to validate against.
Raises:
AttributeError: If a flag is not registered as a valid flag name.
"""
v = gflags_validators.MultiFlagsValidator(
flag_names, multi_flags_checker, message)
_add_validator(flag_values, v) | python | def register_multi_flags_validator(flag_names,
multi_flags_checker,
message='Flags validation failed',
flag_values=FLAGS):
"""Adds a constraint to multiple flags.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_names: [str], a list of the flag names to be checked.
multi_flags_checker: callable, a function to validate the flag.
input - dictionary, with keys() being flag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags.ValidationError.
message: Error text to be shown to the user if checker returns False.
If checker raises gflags.ValidationError, message from the raised error
will be shown.
flag_values: An optional FlagValues instance to validate against.
Raises:
AttributeError: If a flag is not registered as a valid flag name.
"""
v = gflags_validators.MultiFlagsValidator(
flag_names, multi_flags_checker, message)
_add_validator(flag_values, v) | [
"def",
"register_multi_flags_validator",
"(",
"flag_names",
",",
"multi_flags_checker",
",",
"message",
"=",
"'Flags validation failed'",
",",
"flag_values",
"=",
"FLAGS",
")",
":",
"v",
"=",
"gflags_validators",
".",
"MultiFlagsValidator",
"(",
"flag_names",
",",
"mu... | Adds a constraint to multiple flags.
The constraint is validated when flags are initially parsed, and after each
change of the corresponding flag's value.
Args:
flag_names: [str], a list of the flag names to be checked.
multi_flags_checker: callable, a function to validate the flag.
input - dictionary, with keys() being flag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise gflags.ValidationError.
message: Error text to be shown to the user if checker returns False.
If checker raises gflags.ValidationError, message from the raised error
will be shown.
flag_values: An optional FlagValues instance to validate against.
Raises:
AttributeError: If a flag is not registered as a valid flag name. | [
"Adds",
"a",
"constraint",
"to",
"multiple",
"flags",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L187-L214 | train | 22,907 |
google/python-gflags | gflags/__init__.py | multi_flags_validator | def multi_flags_validator(flag_names,
message='Flag validation failed',
flag_values=FLAGS):
"""A function decorator for defining a multi-flag validator.
Registers the decorated function as a validator for flag_names, e.g.
@gflags.multi_flags_validator(['foo', 'bar'])
def _CheckFooBar(flags_dict):
...
See register_multi_flags_validator() for the specification of checker
function.
Args:
flag_names: [str], a list of the flag names to be checked.
message: error text to be shown to the user if checker returns False.
If checker raises ValidationError, message from the raised
error will be shown.
flag_values: An optional FlagValues instance to validate against.
Returns:
A function decorator that registers its function argument as a validator.
Raises:
AttributeError: If a flag is not registered as a valid flag name.
"""
def decorate(function):
register_multi_flags_validator(flag_names,
function,
message=message,
flag_values=flag_values)
return function
return decorate | python | def multi_flags_validator(flag_names,
message='Flag validation failed',
flag_values=FLAGS):
"""A function decorator for defining a multi-flag validator.
Registers the decorated function as a validator for flag_names, e.g.
@gflags.multi_flags_validator(['foo', 'bar'])
def _CheckFooBar(flags_dict):
...
See register_multi_flags_validator() for the specification of checker
function.
Args:
flag_names: [str], a list of the flag names to be checked.
message: error text to be shown to the user if checker returns False.
If checker raises ValidationError, message from the raised
error will be shown.
flag_values: An optional FlagValues instance to validate against.
Returns:
A function decorator that registers its function argument as a validator.
Raises:
AttributeError: If a flag is not registered as a valid flag name.
"""
def decorate(function):
register_multi_flags_validator(flag_names,
function,
message=message,
flag_values=flag_values)
return function
return decorate | [
"def",
"multi_flags_validator",
"(",
"flag_names",
",",
"message",
"=",
"'Flag validation failed'",
",",
"flag_values",
"=",
"FLAGS",
")",
":",
"def",
"decorate",
"(",
"function",
")",
":",
"register_multi_flags_validator",
"(",
"flag_names",
",",
"function",
",",
... | A function decorator for defining a multi-flag validator.
Registers the decorated function as a validator for flag_names, e.g.
@gflags.multi_flags_validator(['foo', 'bar'])
def _CheckFooBar(flags_dict):
...
See register_multi_flags_validator() for the specification of checker
function.
Args:
flag_names: [str], a list of the flag names to be checked.
message: error text to be shown to the user if checker returns False.
If checker raises ValidationError, message from the raised
error will be shown.
flag_values: An optional FlagValues instance to validate against.
Returns:
A function decorator that registers its function argument as a validator.
Raises:
AttributeError: If a flag is not registered as a valid flag name. | [
"A",
"function",
"decorator",
"for",
"defining",
"a",
"multi",
"-",
"flag",
"validator",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L217-L252 | train | 22,908 |
google/python-gflags | gflags/__init__.py | mark_flag_as_required | def mark_flag_as_required(flag_name, flag_values=FLAGS):
"""Ensures that flag is not None during program execution.
Registers a flag validator, which will follow usual validator rules.
Important note: validator will pass for any non-None value, such as False,
0 (zero), '' (empty string) and so on.
It is recommended to call this method like this:
if __name__ == '__main__':
gflags.mark_flag_as_required('your_flag_name')
app.run()
Because validation happens at app.run() we want to ensure required-ness
is enforced at that time. However, you generally do not want to force
users who import your code to have additional required flags for their
own binaries or tests.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
if flag_values[flag_name].default is not None:
# TODO(vrusinov): Turn this warning into an exception.
warnings.warn(
'Flag %s has a non-None default value; therefore, '
'mark_flag_as_required will pass even if flag is not specified in the '
'command line!' % flag_name)
register_validator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values) | python | def mark_flag_as_required(flag_name, flag_values=FLAGS):
"""Ensures that flag is not None during program execution.
Registers a flag validator, which will follow usual validator rules.
Important note: validator will pass for any non-None value, such as False,
0 (zero), '' (empty string) and so on.
It is recommended to call this method like this:
if __name__ == '__main__':
gflags.mark_flag_as_required('your_flag_name')
app.run()
Because validation happens at app.run() we want to ensure required-ness
is enforced at that time. However, you generally do not want to force
users who import your code to have additional required flags for their
own binaries or tests.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name.
"""
if flag_values[flag_name].default is not None:
# TODO(vrusinov): Turn this warning into an exception.
warnings.warn(
'Flag %s has a non-None default value; therefore, '
'mark_flag_as_required will pass even if flag is not specified in the '
'command line!' % flag_name)
register_validator(flag_name,
lambda value: value is not None,
message='Flag --%s must be specified.' % flag_name,
flag_values=flag_values) | [
"def",
"mark_flag_as_required",
"(",
"flag_name",
",",
"flag_values",
"=",
"FLAGS",
")",
":",
"if",
"flag_values",
"[",
"flag_name",
"]",
".",
"default",
"is",
"not",
"None",
":",
"# TODO(vrusinov): Turn this warning into an exception.",
"warnings",
".",
"warn",
"("... | Ensures that flag is not None during program execution.
Registers a flag validator, which will follow usual validator rules.
Important note: validator will pass for any non-None value, such as False,
0 (zero), '' (empty string) and so on.
It is recommended to call this method like this:
if __name__ == '__main__':
gflags.mark_flag_as_required('your_flag_name')
app.run()
Because validation happens at app.run() we want to ensure required-ness
is enforced at that time. However, you generally do not want to force
users who import your code to have additional required flags for their
own binaries or tests.
Args:
flag_name: string, name of the flag
flag_values: FlagValues
Raises:
AttributeError: if flag_name is not registered as a valid flag name. | [
"Ensures",
"that",
"flag",
"is",
"not",
"None",
"during",
"program",
"execution",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L255-L288 | train | 22,909 |
google/python-gflags | gflags/__init__.py | mark_flags_as_mutual_exclusive | def mark_flags_as_mutual_exclusive(flag_names, required=False,
flag_values=FLAGS):
"""Ensures that only one flag among flag_names is set.
Args:
flag_names: [str], a list of the flag names to be checked.
required: Boolean, if set, exactly one of the flags must be set.
Otherwise, it is also valid for none of the flags to be set.
flag_values: An optional FlagValues instance to validate against.
"""
def validate_mutual_exclusion(flags_dict):
flag_count = sum(1 for val in flags_dict.values() if val is not None)
if flag_count == 1 or (not required and flag_count == 0):
return True
message = ('%s one of (%s) must be specified.' %
('Exactly' if required else 'At most', ', '.join(flag_names)))
raise ValidationError(message)
register_multi_flags_validator(
flag_names, validate_mutual_exclusion, flag_values=flag_values) | python | def mark_flags_as_mutual_exclusive(flag_names, required=False,
flag_values=FLAGS):
"""Ensures that only one flag among flag_names is set.
Args:
flag_names: [str], a list of the flag names to be checked.
required: Boolean, if set, exactly one of the flags must be set.
Otherwise, it is also valid for none of the flags to be set.
flag_values: An optional FlagValues instance to validate against.
"""
def validate_mutual_exclusion(flags_dict):
flag_count = sum(1 for val in flags_dict.values() if val is not None)
if flag_count == 1 or (not required and flag_count == 0):
return True
message = ('%s one of (%s) must be specified.' %
('Exactly' if required else 'At most', ', '.join(flag_names)))
raise ValidationError(message)
register_multi_flags_validator(
flag_names, validate_mutual_exclusion, flag_values=flag_values) | [
"def",
"mark_flags_as_mutual_exclusive",
"(",
"flag_names",
",",
"required",
"=",
"False",
",",
"flag_values",
"=",
"FLAGS",
")",
":",
"def",
"validate_mutual_exclusion",
"(",
"flags_dict",
")",
":",
"flag_count",
"=",
"sum",
"(",
"1",
"for",
"val",
"in",
"fla... | Ensures that only one flag among flag_names is set.
Args:
flag_names: [str], a list of the flag names to be checked.
required: Boolean, if set, exactly one of the flags must be set.
Otherwise, it is also valid for none of the flags to be set.
flag_values: An optional FlagValues instance to validate against. | [
"Ensures",
"that",
"only",
"one",
"flag",
"among",
"flag_names",
"is",
"set",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L310-L330 | train | 22,910 |
google/python-gflags | gflags/__init__.py | DEFINE_enum | def DEFINE_enum( # pylint: disable=g-bad-name,redefined-builtin
name, default, enum_values, help, flag_values=FLAGS, module_name=None,
**args):
"""Registers a flag whose value can be any string from enum_values.
Args:
name: A string, the flag name.
default: The default value of the flag.
enum_values: A list of strings with the possible values for the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values, module_name) | python | def DEFINE_enum( # pylint: disable=g-bad-name,redefined-builtin
name, default, enum_values, help, flag_values=FLAGS, module_name=None,
**args):
"""Registers a flag whose value can be any string from enum_values.
Args:
name: A string, the flag name.
default: The default value of the flag.
enum_values: A list of strings with the possible values for the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args),
flag_values, module_name) | [
"def",
"DEFINE_enum",
"(",
"# pylint: disable=g-bad-name,redefined-builtin",
"name",
",",
"default",
",",
"enum_values",
",",
"help",
",",
"flag_values",
"=",
"FLAGS",
",",
"module_name",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"DEFINE_flag",
"(",
"EnumFla... | Registers a flag whose value can be any string from enum_values.
Args:
name: A string, the flag name.
default: The default value of the flag.
enum_values: A list of strings with the possible values for the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__. | [
"Registers",
"a",
"flag",
"whose",
"value",
"can",
"be",
"any",
"string",
"from",
"enum_values",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L639-L656 | train | 22,911 |
google/python-gflags | gflags/__init__.py | DEFINE_list | def DEFINE_list( # pylint: disable=g-bad-name,redefined-builtin
name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings.
The flag value is parsed with a CSV parser.
Args:
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
parser = ListParser()
serializer = CsvListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args) | python | def DEFINE_list( # pylint: disable=g-bad-name,redefined-builtin
name, default, help, flag_values=FLAGS, **args):
"""Registers a flag whose value is a comma-separated list of strings.
The flag value is parsed with a CSV parser.
Args:
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
parser = ListParser()
serializer = CsvListSerializer(',')
DEFINE(parser, name, default, help, flag_values, serializer, **args) | [
"def",
"DEFINE_list",
"(",
"# pylint: disable=g-bad-name,redefined-builtin",
"name",
",",
"default",
",",
"help",
",",
"flag_values",
"=",
"FLAGS",
",",
"*",
"*",
"args",
")",
":",
"parser",
"=",
"ListParser",
"(",
")",
"serializer",
"=",
"CsvListSerializer",
"(... | Registers a flag whose value is a comma-separated list of strings.
The flag value is parsed with a CSV parser.
Args:
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__. | [
"Registers",
"a",
"flag",
"whose",
"value",
"is",
"a",
"comma",
"-",
"separated",
"list",
"of",
"strings",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L659-L675 | train | 22,912 |
google/python-gflags | gflags/__init__.py | DEFINE_multi | def DEFINE_multi( # pylint: disable=g-bad-name,redefined-builtin
parser, serializer, name, default, help, flag_values=FLAGS,
module_name=None, **args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
serializer: ArgumentSerializer that serializes the flag value.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values, module_name) | python | def DEFINE_multi( # pylint: disable=g-bad-name,redefined-builtin
parser, serializer, name, default, help, flag_values=FLAGS,
module_name=None, **args):
"""Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
serializer: ArgumentSerializer that serializes the flag value.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args),
flag_values, module_name) | [
"def",
"DEFINE_multi",
"(",
"# pylint: disable=g-bad-name,redefined-builtin",
"parser",
",",
"serializer",
",",
"name",
",",
"default",
",",
"help",
",",
"flag_values",
"=",
"FLAGS",
",",
"module_name",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"DEFINE_flag"... | Registers a generic MultiFlag that parses its args with a given parser.
Auxiliary function. Normal users should NOT use it directly.
Developers who need to create their own 'Parser' classes for options
which can appear multiple times can call this module function to
register their flags.
Args:
parser: ArgumentParser that is used to parse the flag arguments.
serializer: ArgumentSerializer that serializes the flag value.
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the Python module declaring this flag.
If not provided, it will be computed using the stack trace of this call.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__. | [
"Registers",
"a",
"generic",
"MultiFlag",
"that",
"parses",
"its",
"args",
"with",
"a",
"given",
"parser",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L700-L724 | train | 22,913 |
google/python-gflags | gflags/__init__.py | DEFINE_multi_float | def DEFINE_multi_float( # pylint: disable=g-bad-name,redefined-builtin
name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
Args:
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
lower_bound: float, min values of the flag.
upper_bound: float, max values of the flag.
flag_values: FlagValues object with which the flag will be registered.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) | python | def DEFINE_multi_float( # pylint: disable=g-bad-name,redefined-builtin
name, default, help, lower_bound=None, upper_bound=None,
flag_values=FLAGS, **args):
"""Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
Args:
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
lower_bound: float, min values of the flag.
upper_bound: float, max values of the flag.
flag_values: FlagValues object with which the flag will be registered.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__.
"""
parser = FloatParser(lower_bound, upper_bound)
serializer = ArgumentSerializer()
DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) | [
"def",
"DEFINE_multi_float",
"(",
"# pylint: disable=g-bad-name,redefined-builtin",
"name",
",",
"default",
",",
"help",
",",
"lower_bound",
"=",
"None",
",",
"upper_bound",
"=",
"None",
",",
"flag_values",
"=",
"FLAGS",
",",
"*",
"*",
"args",
")",
":",
"parser"... | Registers a flag whose value can be a list of arbitrary floats.
Use the flag on the command line multiple times to place multiple
float values into the list. The 'default' may be a single float
(which will be converted into a single-element list) or a list of
floats.
Args:
name: A string, the flag name.
default: The default value of the flag.
help: A help string.
lower_bound: float, min values of the flag.
upper_bound: float, max values of the flag.
flag_values: FlagValues object with which the flag will be registered.
**args: Dictionary with extra keyword args that are passed to the
Flag __init__. | [
"Registers",
"a",
"flag",
"whose",
"value",
"can",
"be",
"a",
"list",
"of",
"arbitrary",
"floats",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L775-L797 | train | 22,914 |
google/python-gflags | gflags/__init__.py | DEFINE_alias | def DEFINE_alias(name, original_name, flag_values=FLAGS, module_name=None): # pylint: disable=g-bad-name
"""Defines an alias flag for an existing one.
Args:
name: A string, name of the alias flag.
original_name: A string, name of the original flag.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the module that defines this flag.
Raises:
gflags.FlagError:
UnrecognizedFlagError: if the referenced flag doesn't exist.
DuplicateFlagError: if the alias name has been used by some existing flag.
"""
if original_name not in flag_values:
raise UnrecognizedFlagError(original_name)
flag = flag_values[original_name]
class _Parser(ArgumentParser):
"""The parser for the alias flag calls the original flag parser."""
def parse(self, argument):
flag.parse(argument)
return flag.value
class _FlagAlias(Flag):
"""Overrides Flag class so alias value is copy of original flag value."""
@property
def value(self):
return flag.value
@value.setter
def value(self, value):
flag.value = value
help_msg = 'Alias for --%s.' % flag.name
# If alias_name has been used, gflags.DuplicatedFlag will be raised.
DEFINE_flag(_FlagAlias(_Parser(), flag.serializer, name, flag.default,
help_msg, boolean=flag.boolean),
flag_values, module_name) | python | def DEFINE_alias(name, original_name, flag_values=FLAGS, module_name=None): # pylint: disable=g-bad-name
"""Defines an alias flag for an existing one.
Args:
name: A string, name of the alias flag.
original_name: A string, name of the original flag.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the module that defines this flag.
Raises:
gflags.FlagError:
UnrecognizedFlagError: if the referenced flag doesn't exist.
DuplicateFlagError: if the alias name has been used by some existing flag.
"""
if original_name not in flag_values:
raise UnrecognizedFlagError(original_name)
flag = flag_values[original_name]
class _Parser(ArgumentParser):
"""The parser for the alias flag calls the original flag parser."""
def parse(self, argument):
flag.parse(argument)
return flag.value
class _FlagAlias(Flag):
"""Overrides Flag class so alias value is copy of original flag value."""
@property
def value(self):
return flag.value
@value.setter
def value(self, value):
flag.value = value
help_msg = 'Alias for --%s.' % flag.name
# If alias_name has been used, gflags.DuplicatedFlag will be raised.
DEFINE_flag(_FlagAlias(_Parser(), flag.serializer, name, flag.default,
help_msg, boolean=flag.boolean),
flag_values, module_name) | [
"def",
"DEFINE_alias",
"(",
"name",
",",
"original_name",
",",
"flag_values",
"=",
"FLAGS",
",",
"module_name",
"=",
"None",
")",
":",
"# pylint: disable=g-bad-name",
"if",
"original_name",
"not",
"in",
"flag_values",
":",
"raise",
"UnrecognizedFlagError",
"(",
"o... | Defines an alias flag for an existing one.
Args:
name: A string, name of the alias flag.
original_name: A string, name of the original flag.
flag_values: FlagValues object with which the flag will be registered.
module_name: A string, the name of the module that defines this flag.
Raises:
gflags.FlagError:
UnrecognizedFlagError: if the referenced flag doesn't exist.
DuplicateFlagError: if the alias name has been used by some existing flag. | [
"Defines",
"an",
"alias",
"flag",
"for",
"an",
"existing",
"one",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/__init__.py#L825-L865 | train | 22,915 |
google/python-gflags | gflags/exceptions.py | DuplicateFlagError.from_flag | def from_flag(cls, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
"""
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _helpers.GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
flag_summary = flag_values[flagname].help
msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
"Description from first occurrence: %s") % (
flagname, first_module, second_module, flag_summary)
return cls(msg) | python | def from_flag(cls, flagname, flag_values, other_flag_values=None):
"""Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
"""
first_module = flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _helpers.GetCallingModule()
else:
second_module = other_flag_values.FindModuleDefiningFlag(
flagname, default='<unknown>')
flag_summary = flag_values[flagname].help
msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
"Description from first occurrence: %s") % (
flagname, first_module, second_module, flag_summary)
return cls(msg) | [
"def",
"from_flag",
"(",
"cls",
",",
"flagname",
",",
"flag_values",
",",
"other_flag_values",
"=",
"None",
")",
":",
"first_module",
"=",
"flag_values",
".",
"FindModuleDefiningFlag",
"(",
"flagname",
",",
"default",
"=",
"'<unknown>'",
")",
"if",
"other_flag_v... | Create a DuplicateFlagError by providing flag name and values.
Args:
flagname: Name of the flag being redefined.
flag_values: FlagValues object containing the first definition of
flagname.
other_flag_values: If this argument is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError. | [
"Create",
"a",
"DuplicateFlagError",
"by",
"providing",
"flag",
"name",
"and",
"values",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/exceptions.py#L71-L98 | train | 22,916 |
google/python-gflags | gflags/argument_parser.py | BooleanParser.convert | def convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if isinstance(argument, str):
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument) | python | def convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if isinstance(argument, str):
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument) | [
"def",
"convert",
"(",
"self",
",",
"argument",
")",
":",
"if",
"isinstance",
"(",
"argument",
",",
"str",
")",
":",
"if",
"argument",
".",
"lower",
"(",
")",
"in",
"[",
"'true'",
",",
"'t'",
",",
"'1'",
"]",
":",
"return",
"True",
"elif",
"argumen... | Converts the argument to a boolean; raise ValueError on errors. | [
"Converts",
"the",
"argument",
"to",
"a",
"boolean",
";",
"raise",
"ValueError",
"on",
"errors",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/argument_parser.py#L270-L284 | train | 22,917 |
google/python-gflags | gflags/argument_parser.py | EnumParser.parse | def parse(self, argument):
"""Determine validity of argument and return the correct element of enum.
If self.enum_values is empty, then all arguments are valid and argument
will be returned.
Otherwise, if argument matches an element in enum, then the first
matching element will be returned.
Args:
argument: The supplied flag value.
Returns:
The matching element from enum_values, or argument if enum_values is
empty.
Raises:
ValueError: enum_values was non-empty, but argument didn't match
anything in enum.
"""
if not self.enum_values:
return argument
elif self.case_sensitive:
if argument not in self.enum_values:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return argument
else:
if argument.upper() not in [value.upper() for value in self.enum_values]:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return [value for value in self.enum_values
if value.upper() == argument.upper()][0] | python | def parse(self, argument):
"""Determine validity of argument and return the correct element of enum.
If self.enum_values is empty, then all arguments are valid and argument
will be returned.
Otherwise, if argument matches an element in enum, then the first
matching element will be returned.
Args:
argument: The supplied flag value.
Returns:
The matching element from enum_values, or argument if enum_values is
empty.
Raises:
ValueError: enum_values was non-empty, but argument didn't match
anything in enum.
"""
if not self.enum_values:
return argument
elif self.case_sensitive:
if argument not in self.enum_values:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return argument
else:
if argument.upper() not in [value.upper() for value in self.enum_values]:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return [value for value in self.enum_values
if value.upper() == argument.upper()][0] | [
"def",
"parse",
"(",
"self",
",",
"argument",
")",
":",
"if",
"not",
"self",
".",
"enum_values",
":",
"return",
"argument",
"elif",
"self",
".",
"case_sensitive",
":",
"if",
"argument",
"not",
"in",
"self",
".",
"enum_values",
":",
"raise",
"ValueError",
... | Determine validity of argument and return the correct element of enum.
If self.enum_values is empty, then all arguments are valid and argument
will be returned.
Otherwise, if argument matches an element in enum, then the first
matching element will be returned.
Args:
argument: The supplied flag value.
Returns:
The matching element from enum_values, or argument if enum_values is
empty.
Raises:
ValueError: enum_values was non-empty, but argument didn't match
anything in enum. | [
"Determine",
"validity",
"of",
"argument",
"and",
"return",
"the",
"correct",
"element",
"of",
"enum",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/argument_parser.py#L311-L345 | train | 22,918 |
google/python-gflags | gflags/argument_parser.py | CsvListSerializer.serialize | def serialize(self, value):
"""Serialize a list as a string, if possible, or as a unicode string."""
if six.PY2:
# In Python2 csv.writer doesn't accept unicode, so we convert to UTF-8.
output = io.BytesIO()
csv.writer(output).writerow([unicode(x).encode('utf-8') for x in value])
serialized_value = output.getvalue().decode('utf-8').strip()
else:
# In Python3 csv.writer expects a text stream.
output = io.StringIO()
csv.writer(output).writerow([str(x) for x in value])
serialized_value = output.getvalue().strip()
# We need the returned value to be pure ascii or Unicodes so that
# when the xml help is generated they are usefully encodable.
return _helpers.StrOrUnicode(serialized_value) | python | def serialize(self, value):
"""Serialize a list as a string, if possible, or as a unicode string."""
if six.PY2:
# In Python2 csv.writer doesn't accept unicode, so we convert to UTF-8.
output = io.BytesIO()
csv.writer(output).writerow([unicode(x).encode('utf-8') for x in value])
serialized_value = output.getvalue().decode('utf-8').strip()
else:
# In Python3 csv.writer expects a text stream.
output = io.StringIO()
csv.writer(output).writerow([str(x) for x in value])
serialized_value = output.getvalue().strip()
# We need the returned value to be pure ascii or Unicodes so that
# when the xml help is generated they are usefully encodable.
return _helpers.StrOrUnicode(serialized_value) | [
"def",
"serialize",
"(",
"self",
",",
"value",
")",
":",
"if",
"six",
".",
"PY2",
":",
"# In Python2 csv.writer doesn't accept unicode, so we convert to UTF-8.",
"output",
"=",
"io",
".",
"BytesIO",
"(",
")",
"csv",
".",
"writer",
"(",
"output",
")",
".",
"wri... | Serialize a list as a string, if possible, or as a unicode string. | [
"Serialize",
"a",
"list",
"as",
"a",
"string",
"if",
"possible",
"or",
"as",
"a",
"unicode",
"string",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/argument_parser.py#L365-L380 | train | 22,919 |
shichao-an/soundmeter | soundmeter/meter.py | Meter.record | def record(self):
"""
Record PyAudio stream into StringIO output
This coroutine keeps stream open; the stream is closed in stop()
"""
while True:
frames = []
self.stream.start_stream()
for i in range(self.num_frames):
data = self.stream.read(self.config.FRAMES_PER_BUFFER)
frames.append(data)
self.output.seek(0)
w = wave.open(self.output, 'wb')
w.setnchannels(self.config.CHANNELS)
w.setsampwidth(self.audio.get_sample_size(self.config.FORMAT))
w.setframerate(self.config.RATE)
w.writeframes(b''.join(frames))
w.close()
yield | python | def record(self):
"""
Record PyAudio stream into StringIO output
This coroutine keeps stream open; the stream is closed in stop()
"""
while True:
frames = []
self.stream.start_stream()
for i in range(self.num_frames):
data = self.stream.read(self.config.FRAMES_PER_BUFFER)
frames.append(data)
self.output.seek(0)
w = wave.open(self.output, 'wb')
w.setnchannels(self.config.CHANNELS)
w.setsampwidth(self.audio.get_sample_size(self.config.FORMAT))
w.setframerate(self.config.RATE)
w.writeframes(b''.join(frames))
w.close()
yield | [
"def",
"record",
"(",
"self",
")",
":",
"while",
"True",
":",
"frames",
"=",
"[",
"]",
"self",
".",
"stream",
".",
"start_stream",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"num_frames",
")",
":",
"data",
"=",
"self",
".",
"stream",
... | Record PyAudio stream into StringIO output
This coroutine keeps stream open; the stream is closed in stop() | [
"Record",
"PyAudio",
"stream",
"into",
"StringIO",
"output"
] | 89222cd45e6ac24da32a1197d6b4be891d63267d | https://github.com/shichao-an/soundmeter/blob/89222cd45e6ac24da32a1197d6b4be891d63267d/soundmeter/meter.py#L84-L104 | train | 22,920 |
shichao-an/soundmeter | soundmeter/meter.py | Meter.stop | def stop(self):
"""Stop the stream and terminate PyAudio"""
self.prestop()
if not self._graceful:
self._graceful = True
self.stream.stop_stream()
self.audio.terminate()
msg = 'Stopped'
self.verbose_info(msg, log=False)
# Log 'Stopped' anyway
if self.log:
self.logging.info(msg)
if self.collect:
if self._data:
print('Collected result:')
print(' min: %10d' % self._data['min'])
print(' max: %10d' % self._data['max'])
print(' avg: %10d' % int(self._data['avg']))
self.poststop() | python | def stop(self):
"""Stop the stream and terminate PyAudio"""
self.prestop()
if not self._graceful:
self._graceful = True
self.stream.stop_stream()
self.audio.terminate()
msg = 'Stopped'
self.verbose_info(msg, log=False)
# Log 'Stopped' anyway
if self.log:
self.logging.info(msg)
if self.collect:
if self._data:
print('Collected result:')
print(' min: %10d' % self._data['min'])
print(' max: %10d' % self._data['max'])
print(' avg: %10d' % int(self._data['avg']))
self.poststop() | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"prestop",
"(",
")",
"if",
"not",
"self",
".",
"_graceful",
":",
"self",
".",
"_graceful",
"=",
"True",
"self",
".",
"stream",
".",
"stop_stream",
"(",
")",
"self",
".",
"audio",
".",
"terminate",
... | Stop the stream and terminate PyAudio | [
"Stop",
"the",
"stream",
"and",
"terminate",
"PyAudio"
] | 89222cd45e6ac24da32a1197d6b4be891d63267d | https://github.com/shichao-an/soundmeter/blob/89222cd45e6ac24da32a1197d6b4be891d63267d/soundmeter/meter.py#L161-L179 | train | 22,921 |
shichao-an/soundmeter | soundmeter/meter.py | Meter.get_threshold | def get_threshold(self):
"""Get and validate raw RMS value from threshold"""
if self.threshold.startswith('+'):
if self.threshold[1:].isdigit():
self._threshold = int(self.threshold[1:])
self._upper = True
elif self.threshold.startswith('-'):
if self.threshold[1:].isdigit():
self._threshold = int(self.threshold[1:])
self._upper = False
else:
if self.threshold.isdigit():
self._threshold = int(self.threshold)
self._upper = True
if not hasattr(self, '_threshold'):
raise ValueError('Invalid threshold') | python | def get_threshold(self):
"""Get and validate raw RMS value from threshold"""
if self.threshold.startswith('+'):
if self.threshold[1:].isdigit():
self._threshold = int(self.threshold[1:])
self._upper = True
elif self.threshold.startswith('-'):
if self.threshold[1:].isdigit():
self._threshold = int(self.threshold[1:])
self._upper = False
else:
if self.threshold.isdigit():
self._threshold = int(self.threshold)
self._upper = True
if not hasattr(self, '_threshold'):
raise ValueError('Invalid threshold') | [
"def",
"get_threshold",
"(",
"self",
")",
":",
"if",
"self",
".",
"threshold",
".",
"startswith",
"(",
"'+'",
")",
":",
"if",
"self",
".",
"threshold",
"[",
"1",
":",
"]",
".",
"isdigit",
"(",
")",
":",
"self",
".",
"_threshold",
"=",
"int",
"(",
... | Get and validate raw RMS value from threshold | [
"Get",
"and",
"validate",
"raw",
"RMS",
"value",
"from",
"threshold"
] | 89222cd45e6ac24da32a1197d6b4be891d63267d | https://github.com/shichao-an/soundmeter/blob/89222cd45e6ac24da32a1197d6b4be891d63267d/soundmeter/meter.py#L181-L197 | train | 22,922 |
shichao-an/soundmeter | soundmeter/meter.py | Meter.collect_rms | def collect_rms(self, rms):
"""Collect and calculate min, max and average RMS values"""
if self._data:
self._data['min'] = min(rms, self._data['min'])
self._data['max'] = max(rms, self._data['max'])
self._data['avg'] = float(rms + self._data['avg']) / 2
else:
self._data['min'] = rms
self._data['max'] = rms
self._data['avg'] = rms | python | def collect_rms(self, rms):
"""Collect and calculate min, max and average RMS values"""
if self._data:
self._data['min'] = min(rms, self._data['min'])
self._data['max'] = max(rms, self._data['max'])
self._data['avg'] = float(rms + self._data['avg']) / 2
else:
self._data['min'] = rms
self._data['max'] = rms
self._data['avg'] = rms | [
"def",
"collect_rms",
"(",
"self",
",",
"rms",
")",
":",
"if",
"self",
".",
"_data",
":",
"self",
".",
"_data",
"[",
"'min'",
"]",
"=",
"min",
"(",
"rms",
",",
"self",
".",
"_data",
"[",
"'min'",
"]",
")",
"self",
".",
"_data",
"[",
"'max'",
"]... | Collect and calculate min, max and average RMS values | [
"Collect",
"and",
"calculate",
"min",
"max",
"and",
"average",
"RMS",
"values"
] | 89222cd45e6ac24da32a1197d6b4be891d63267d | https://github.com/shichao-an/soundmeter/blob/89222cd45e6ac24da32a1197d6b4be891d63267d/soundmeter/meter.py#L258-L267 | train | 22,923 |
chainside/btcpy | btcpy/structs/transaction.py | TimeBasedSequence.from_timedelta | def from_timedelta(cls, timedelta):
"""expects a datetime.timedelta object"""
from math import ceil
units = ceil(timedelta.total_seconds() / cls.time_unit)
return cls.create(units) | python | def from_timedelta(cls, timedelta):
"""expects a datetime.timedelta object"""
from math import ceil
units = ceil(timedelta.total_seconds() / cls.time_unit)
return cls.create(units) | [
"def",
"from_timedelta",
"(",
"cls",
",",
"timedelta",
")",
":",
"from",
"math",
"import",
"ceil",
"units",
"=",
"ceil",
"(",
"timedelta",
".",
"total_seconds",
"(",
")",
"/",
"cls",
".",
"time_unit",
")",
"return",
"cls",
".",
"create",
"(",
"units",
... | expects a datetime.timedelta object | [
"expects",
"a",
"datetime",
".",
"timedelta",
"object"
] | 8e75c630dacf0f997ed0e0e8739bed428a95d7b1 | https://github.com/chainside/btcpy/blob/8e75c630dacf0f997ed0e0e8739bed428a95d7b1/btcpy/structs/transaction.py#L126-L130 | train | 22,924 |
chainside/btcpy | btcpy/lib/base58.py | b58decode_check | def b58decode_check(v: str) -> bytes:
'''Decode and verify the checksum of a Base58 encoded string'''
result = b58decode(v)
result, check = result[:-4], result[-4:]
digest = sha256(sha256(result).digest()).digest()
if check != digest[:4]:
raise ValueError("Invalid checksum")
return result | python | def b58decode_check(v: str) -> bytes:
'''Decode and verify the checksum of a Base58 encoded string'''
result = b58decode(v)
result, check = result[:-4], result[-4:]
digest = sha256(sha256(result).digest()).digest()
if check != digest[:4]:
raise ValueError("Invalid checksum")
return result | [
"def",
"b58decode_check",
"(",
"v",
":",
"str",
")",
"->",
"bytes",
":",
"result",
"=",
"b58decode",
"(",
"v",
")",
"result",
",",
"check",
"=",
"result",
"[",
":",
"-",
"4",
"]",
",",
"result",
"[",
"-",
"4",
":",
"]",
"digest",
"=",
"sha256",
... | Decode and verify the checksum of a Base58 encoded string | [
"Decode",
"and",
"verify",
"the",
"checksum",
"of",
"a",
"Base58",
"encoded",
"string"
] | 8e75c630dacf0f997ed0e0e8739bed428a95d7b1 | https://github.com/chainside/btcpy/blob/8e75c630dacf0f997ed0e0e8739bed428a95d7b1/btcpy/lib/base58.py#L64-L74 | train | 22,925 |
chainside/btcpy | btcpy/lib/bech32.py | bech32_decode | def bech32_decode(bech):
"""Validate a Bech32 string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return None, None
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return None, None
if not all(x in CHARSET for x in bech[pos+1:]):
return None, None
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return None, None
return hrp, data[:-6] | python | def bech32_decode(bech):
"""Validate a Bech32 string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return None, None
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return None, None
if not all(x in CHARSET for x in bech[pos+1:]):
return None, None
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return None, None
return hrp, data[:-6] | [
"def",
"bech32_decode",
"(",
"bech",
")",
":",
"if",
"(",
"(",
"any",
"(",
"ord",
"(",
"x",
")",
"<",
"33",
"or",
"ord",
"(",
"x",
")",
">",
"126",
"for",
"x",
"in",
"bech",
")",
")",
"or",
"(",
"bech",
".",
"lower",
"(",
")",
"!=",
"bech",... | Validate a Bech32 string, and determine HRP and data. | [
"Validate",
"a",
"Bech32",
"string",
"and",
"determine",
"HRP",
"and",
"data",
"."
] | 8e75c630dacf0f997ed0e0e8739bed428a95d7b1 | https://github.com/chainside/btcpy/blob/8e75c630dacf0f997ed0e0e8739bed428a95d7b1/btcpy/lib/bech32.py#L62-L77 | train | 22,926 |
yourlabs/django-session-security | session_security/middleware.py | SessionSecurityMiddleware.process_request | def process_request(self, request):
""" Update last activity time or logout. """
if django.VERSION < (1, 10):
is_authenticated = request.user.is_authenticated()
else:
is_authenticated = request.user.is_authenticated
if not is_authenticated:
return
now = datetime.now()
if '_session_security' not in request.session:
set_last_activity(request.session, now)
return
delta = now - get_last_activity(request.session)
expire_seconds = self.get_expire_seconds(request)
if delta >= timedelta(seconds=expire_seconds):
logout(request)
elif (request.path == reverse('session_security_ping') and
'idleFor' in request.GET):
self.update_last_activity(request, now)
elif not self.is_passive_request(request):
set_last_activity(request.session, now) | python | def process_request(self, request):
""" Update last activity time or logout. """
if django.VERSION < (1, 10):
is_authenticated = request.user.is_authenticated()
else:
is_authenticated = request.user.is_authenticated
if not is_authenticated:
return
now = datetime.now()
if '_session_security' not in request.session:
set_last_activity(request.session, now)
return
delta = now - get_last_activity(request.session)
expire_seconds = self.get_expire_seconds(request)
if delta >= timedelta(seconds=expire_seconds):
logout(request)
elif (request.path == reverse('session_security_ping') and
'idleFor' in request.GET):
self.update_last_activity(request, now)
elif not self.is_passive_request(request):
set_last_activity(request.session, now) | [
"def",
"process_request",
"(",
"self",
",",
"request",
")",
":",
"if",
"django",
".",
"VERSION",
"<",
"(",
"1",
",",
"10",
")",
":",
"is_authenticated",
"=",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
"else",
":",
"is_authenticated",
"=",... | Update last activity time or logout. | [
"Update",
"last",
"activity",
"time",
"or",
"logout",
"."
] | 5845c55f1c4b8cef8362302d64b396fc705e1a10 | https://github.com/yourlabs/django-session-security/blob/5845c55f1c4b8cef8362302d64b396fc705e1a10/session_security/middleware.py#L56-L80 | train | 22,927 |
yourlabs/django-session-security | session_security/utils.py | get_last_activity | def get_last_activity(session):
"""
Get the last activity datetime string from the session and return the
python datetime object.
"""
try:
return datetime.strptime(session['_session_security'],
'%Y-%m-%dT%H:%M:%S.%f')
except AttributeError:
#################################################################
# * this is an odd bug in python
# bug report: http://bugs.python.org/issue7980
# bug explained here:
# http://code-trick.com/python-bug-attribute-error-_strptime/
# * sometimes, in multithreaded enviroments, we get AttributeError
# in this case, we just return datetime.now(),
# so that we are not logged out
# "./session_security/middleware.py", in update_last_activity
# last_activity = get_last_activity(request.session)
# "./session_security/utils.py", in get_last_activity
# '%Y-%m-%dT%H:%M:%S.%f')
# AttributeError: _strptime
#
#################################################################
return datetime.now()
except TypeError:
return datetime.now() | python | def get_last_activity(session):
"""
Get the last activity datetime string from the session and return the
python datetime object.
"""
try:
return datetime.strptime(session['_session_security'],
'%Y-%m-%dT%H:%M:%S.%f')
except AttributeError:
#################################################################
# * this is an odd bug in python
# bug report: http://bugs.python.org/issue7980
# bug explained here:
# http://code-trick.com/python-bug-attribute-error-_strptime/
# * sometimes, in multithreaded enviroments, we get AttributeError
# in this case, we just return datetime.now(),
# so that we are not logged out
# "./session_security/middleware.py", in update_last_activity
# last_activity = get_last_activity(request.session)
# "./session_security/utils.py", in get_last_activity
# '%Y-%m-%dT%H:%M:%S.%f')
# AttributeError: _strptime
#
#################################################################
return datetime.now()
except TypeError:
return datetime.now() | [
"def",
"get_last_activity",
"(",
"session",
")",
":",
"try",
":",
"return",
"datetime",
".",
"strptime",
"(",
"session",
"[",
"'_session_security'",
"]",
",",
"'%Y-%m-%dT%H:%M:%S.%f'",
")",
"except",
"AttributeError",
":",
"##############################################... | Get the last activity datetime string from the session and return the
python datetime object. | [
"Get",
"the",
"last",
"activity",
"datetime",
"string",
"from",
"the",
"session",
"and",
"return",
"the",
"python",
"datetime",
"object",
"."
] | 5845c55f1c4b8cef8362302d64b396fc705e1a10 | https://github.com/yourlabs/django-session-security/blob/5845c55f1c4b8cef8362302d64b396fc705e1a10/session_security/utils.py#L11-L38 | train | 22,928 |
pythongssapi/python-gssapi | gssapi/creds.py | Credentials.name | def name(self):
"""Get the name associated with these credentials"""
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name | python | def name(self):
"""Get the name associated with these credentials"""
return self.inquire(name=True, lifetime=False,
usage=False, mechs=False).name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"inquire",
"(",
"name",
"=",
"True",
",",
"lifetime",
"=",
"False",
",",
"usage",
"=",
"False",
",",
"mechs",
"=",
"False",
")",
".",
"name"
] | Get the name associated with these credentials | [
"Get",
"the",
"name",
"associated",
"with",
"these",
"credentials"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L70-L73 | train | 22,929 |
pythongssapi/python-gssapi | gssapi/creds.py | Credentials.acquire | def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
"""Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime) | python | def acquire(cls, name=None, lifetime=None, mechs=None, usage='both',
store=None):
"""Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is None:
res = rcreds.acquire_cred(name, lifetime,
mechs, usage)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.acquire_cred_from(store, name,
lifetime, mechs,
usage)
return tuples.AcquireCredResult(cls(base=res.creds), res.mechs,
res.lifetime) | [
"def",
"acquire",
"(",
"cls",
",",
"name",
"=",
"None",
",",
"lifetime",
"=",
"None",
",",
"mechs",
"=",
"None",
",",
"usage",
"=",
"'both'",
",",
"store",
"=",
"None",
")",
":",
"if",
"store",
"is",
"None",
":",
"res",
"=",
"rcreds",
".",
"acqui... | Acquire GSSAPI credentials
This method acquires credentials. If the `store` argument is
used, the credentials will be acquired from the given
credential store (if supported). Otherwise, the credentials are
acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Using a non-default store requires support for the credentials store
extension.
Args:
name (Name): the name associated with the credentials,
or None for the default name
lifetime (int): the desired lifetime of the credentials, or None
for indefinite
mechs (list): the desired :class:`MechType` OIDs to be used
with the credentials, or None for the default set
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
AcquireCredResult: the acquired credentials and information about
them
Raises:
BadMechanismError
BadNameTypeError
BadNameError
ExpiredCredentialsError
MissingCredentialsError | [
"Acquire",
"GSSAPI",
"credentials"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L94-L151 | train | 22,930 |
pythongssapi/python-gssapi | gssapi/creds.py | Credentials.store | def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default) | python | def store(self, store=None, usage='both', mech=None,
overwrite=False, set_default=False):
"""Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError
"""
if store is None:
if rcred_rfc5588 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for RFC 5588")
return rcred_rfc5588.store_cred(self, usage, mech,
overwrite, set_default)
else:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores directly")
store = _encode_dict(store)
return rcred_cred_store.store_cred_into(store, self, usage, mech,
overwrite, set_default) | [
"def",
"store",
"(",
"self",
",",
"store",
"=",
"None",
",",
"usage",
"=",
"'both'",
",",
"mech",
"=",
"None",
",",
"overwrite",
"=",
"False",
",",
"set_default",
"=",
"False",
")",
":",
"if",
"store",
"is",
"None",
":",
"if",
"rcred_rfc5588",
"is",
... | Store these credentials into the given store
This method stores the current credentials into the specified
credentials store. If the default store is used, support for
:rfc:`5588` is required. Otherwise, support for the credentials
store extension is required.
:requires-ext:`rfc5588` or :requires-ext:`cred_store`
Args:
store (dict): the store into which to store the credentials,
or None for the default store.
usage (str): the usage to store the credentials with -- either
'both', 'initiate', or 'accept'
mech (OID): the :class:`MechType` to associate with the
stored credentials
overwrite (bool): whether or not to overwrite existing credentials
stored with the same name, etc
set_default (bool): whether or not to set these credentials as
the default credentials for the given store.
Returns:
StoreCredResult: the results of the credential storing operation
Raises:
GSSError
ExpiredCredentialsError
MissingCredentialsError
OperationUnavailableError
DuplicateCredentialsElementError | [
"Store",
"these",
"credentials",
"into",
"the",
"given",
"store"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L153-L203 | train | 22,931 |
pythongssapi/python-gssapi | gssapi/creds.py | Credentials.impersonate | def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds) | python | def impersonate(self, name=None, lifetime=None, mechs=None,
usage='initiate'):
"""Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name
"""
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for S4U")
res = rcred_s4u.acquire_cred_impersonate_name(self, name,
lifetime, mechs,
usage)
return type(self)(base=res.creds) | [
"def",
"impersonate",
"(",
"self",
",",
"name",
"=",
"None",
",",
"lifetime",
"=",
"None",
",",
"mechs",
"=",
"None",
",",
"usage",
"=",
"'initiate'",
")",
":",
"if",
"rcred_s4u",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Your GSSAPI imple... | Impersonate a name using the current credentials
This method acquires credentials by impersonating another
name using the current credentials.
:requires-ext:`s4u`
Args:
name (Name): the name to impersonate
lifetime (int): the desired lifetime of the new credentials,
or None for indefinite
mechs (list): the desired :class:`MechType` OIDs for the new
credentials
usage (str): the desired usage for the new credentials -- either
'both', 'initiate', or 'accept'. Note that some mechanisms
may only support 'initiate'.
Returns:
Credentials: the new credentials impersonating the given name | [
"Impersonate",
"a",
"name",
"using",
"the",
"current",
"credentials"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L205-L236 | train | 22,932 |
pythongssapi/python-gssapi | gssapi/creds.py | Credentials.inquire | def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs) | python | def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs) | [
"def",
"inquire",
"(",
"self",
",",
"name",
"=",
"True",
",",
"lifetime",
"=",
"True",
",",
"usage",
"=",
"True",
",",
"mechs",
"=",
"True",
")",
":",
"res",
"=",
"rcreds",
".",
"inquire_cred",
"(",
"self",
",",
"name",
",",
"lifetime",
",",
"usage... | Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError | [
"Inspect",
"these",
"credentials",
"for",
"information"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L238-L267 | train | 22,933 |
pythongssapi/python-gssapi | gssapi/creds.py | Credentials.inquire_by_mech | def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage) | python | def inquire_by_mech(self, mech, name=True, init_lifetime=True,
accept_lifetime=True, usage=True):
"""Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False
"""
res = rcreds.inquire_cred_by_mech(self, mech, name, init_lifetime,
accept_lifetime, usage)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredByMechResult(res_name,
res.init_lifetime,
res.accept_lifetime,
res.usage) | [
"def",
"inquire_by_mech",
"(",
"self",
",",
"mech",
",",
"name",
"=",
"True",
",",
"init_lifetime",
"=",
"True",
",",
"accept_lifetime",
"=",
"True",
",",
"usage",
"=",
"True",
")",
":",
"res",
"=",
"rcreds",
".",
"inquire_cred_by_mech",
"(",
"self",
","... | Inspect these credentials for per-mechanism information
This method inspects these credentials for per-mechanism information
about them.
Args:
mech (OID): the mechanism for which to retrive the information
name (bool): get the name associated with the credentials
init_lifetime (bool): get the remaining initiate lifetime for
the credentials
accept_lifetime (bool): get the remaining accept lifetime for
the credentials
usage (bool): get the usage for the credentials
Returns:
InquireCredByMechResult: the information about the credentials,
with None used when the corresponding argument was False | [
"Inspect",
"these",
"credentials",
"for",
"per",
"-",
"mechanism",
"information"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L269-L301 | train | 22,934 |
pythongssapi/python-gssapi | gssapi/creds.py | Credentials.add | def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds) | python | def add(self, name, mech, usage='both',
init_lifetime=None, accept_lifetime=None, impersonator=None,
store=None):
"""Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError
"""
if store is not None and impersonator is not None:
raise ValueError('You cannot use both the `impersonator` and '
'`store` arguments at the same time')
if store is not None:
if rcred_cred_store is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for manipulating "
"credential stores")
store = _encode_dict(store)
res = rcred_cred_store.add_cred_from(store, self, name, mech,
usage, init_lifetime,
accept_lifetime)
elif impersonator is not None:
if rcred_s4u is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not have support for S4U")
res = rcred_s4u.add_cred_impersonate_name(self, impersonator,
name, mech, usage,
init_lifetime,
accept_lifetime)
else:
res = rcreds.add_cred(self, name, mech, usage, init_lifetime,
accept_lifetime)
return Credentials(res.creds) | [
"def",
"add",
"(",
"self",
",",
"name",
",",
"mech",
",",
"usage",
"=",
"'both'",
",",
"init_lifetime",
"=",
"None",
",",
"accept_lifetime",
"=",
"None",
",",
"impersonator",
"=",
"None",
",",
"store",
"=",
"None",
")",
":",
"if",
"store",
"is",
"not... | Acquire more credentials to add to the current set
This method works like :meth:`acquire`, except that it adds the
acquired credentials for a single mechanism to a copy of the current
set, instead of creating a new set for multiple mechanisms.
Unlike :meth:`acquire`, you cannot pass None desired name or
mechanism.
If the `impersonator` argument is used, the credentials will
impersonate the given name using the impersonator credentials
(:requires-ext:`s4u`).
If the `store` argument is used, the credentials will be acquired
from the given credential store (:requires-ext:`cred_store`).
Otherwise, the credentials are acquired from the default store.
The credential store information is a dictionary containing
mechanisms-specific keys and values pointing to a credential store
or stores.
Note that the `store` argument is not compatible with the
`impersonator` argument.
Args:
name (Name): the name associated with the
credentials
mech (OID): the desired :class:`MechType` to be used with the
credentials
usage (str): the usage for the credentials -- either 'both',
'initiate', or 'accept'
init_lifetime (int): the desired initiate lifetime of the
credentials, or None for indefinite
accept_lifetime (int): the desired accept lifetime of the
credentials, or None for indefinite
impersonator (Credentials): the credentials to use to impersonate
the given name, or None to not acquire normally
(:requires-ext:`s4u`)
store (dict): the credential store information pointing to the
credential store from which to acquire the credentials,
or None for the default store (:requires-ext:`cred_store`)
Returns:
Credentials: the credentials set containing the current credentials
and the newly acquired ones.
Raises:
BadMechanismError
BadNameTypeError
BadNameError
DuplicateCredentialsElementError
ExpiredCredentialsError
MissingCredentialsError | [
"Acquire",
"more",
"credentials",
"to",
"add",
"to",
"the",
"current",
"set"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/creds.py#L303-L386 | train | 22,935 |
pythongssapi/python-gssapi | gssapi/names.py | Name.display_as | def display_as(self, name_type):
"""
Display this name as the given name type.
This method attempts to display the current :class:`Name`
using the syntax of the given :class:`NameType`, if possible.
Warning:
In MIT krb5 versions below 1.13.3, this method can segfault if
the name was not *originally* created with a `name_type` that was
not ``None`` (even in cases when a ``name_type``
is later "added", such as via :meth:`canonicalize`).
**Do not use this method unless you are sure the above
conditions can never happen in your code.**
Warning:
In addition to the above warning, current versions of MIT krb5 do
not actually fully implement this method, and it may return
incorrect results in the case of canonicalized names.
:requires-ext:`rfc6680`
Args:
name_type (OID): the :class:`NameType` to use to display the given
name
Returns:
str: the displayed name
Raises:
OperationUnavailableError
"""
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
return rname_rfc6680.display_name_ext(self, name_type).decode(
_utils._get_encoding()) | python | def display_as(self, name_type):
"""
Display this name as the given name type.
This method attempts to display the current :class:`Name`
using the syntax of the given :class:`NameType`, if possible.
Warning:
In MIT krb5 versions below 1.13.3, this method can segfault if
the name was not *originally* created with a `name_type` that was
not ``None`` (even in cases when a ``name_type``
is later "added", such as via :meth:`canonicalize`).
**Do not use this method unless you are sure the above
conditions can never happen in your code.**
Warning:
In addition to the above warning, current versions of MIT krb5 do
not actually fully implement this method, and it may return
incorrect results in the case of canonicalized names.
:requires-ext:`rfc6680`
Args:
name_type (OID): the :class:`NameType` to use to display the given
name
Returns:
str: the displayed name
Raises:
OperationUnavailableError
"""
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
return rname_rfc6680.display_name_ext(self, name_type).decode(
_utils._get_encoding()) | [
"def",
"display_as",
"(",
"self",
",",
"name_type",
")",
":",
"if",
"rname_rfc6680",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Your GSSAPI implementation does not \"",
"\"support RFC 6680 (the GSSAPI naming \"",
"\"extensions)\"",
")",
"return",
"rname_rfc6... | Display this name as the given name type.
This method attempts to display the current :class:`Name`
using the syntax of the given :class:`NameType`, if possible.
Warning:
In MIT krb5 versions below 1.13.3, this method can segfault if
the name was not *originally* created with a `name_type` that was
not ``None`` (even in cases when a ``name_type``
is later "added", such as via :meth:`canonicalize`).
**Do not use this method unless you are sure the above
conditions can never happen in your code.**
Warning:
In addition to the above warning, current versions of MIT krb5 do
not actually fully implement this method, and it may return
incorrect results in the case of canonicalized names.
:requires-ext:`rfc6680`
Args:
name_type (OID): the :class:`NameType` to use to display the given
name
Returns:
str: the displayed name
Raises:
OperationUnavailableError | [
"Display",
"this",
"name",
"as",
"the",
"given",
"name",
"type",
"."
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/names.py#L125-L165 | train | 22,936 |
pythongssapi/python-gssapi | gssapi/names.py | Name.export | def export(self, composite=False):
"""Export this name as a token.
This method exports the name into a byte string which can then be
imported by using the `token` argument of the constructor.
Args:
composite (bool): whether or not use to a composite token --
:requires-ext:`rfc6680`
Returns:
bytes: the exported name in token form
Raises:
MechanismNameRequiredError
BadNameTypeError
BadNameError
"""
if composite:
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not support RFC 6680 (the GSSAPI "
"naming extensions)")
return rname_rfc6680.export_name_composite(self)
else:
return rname.export_name(self) | python | def export(self, composite=False):
"""Export this name as a token.
This method exports the name into a byte string which can then be
imported by using the `token` argument of the constructor.
Args:
composite (bool): whether or not use to a composite token --
:requires-ext:`rfc6680`
Returns:
bytes: the exported name in token form
Raises:
MechanismNameRequiredError
BadNameTypeError
BadNameError
"""
if composite:
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does "
"not support RFC 6680 (the GSSAPI "
"naming extensions)")
return rname_rfc6680.export_name_composite(self)
else:
return rname.export_name(self) | [
"def",
"export",
"(",
"self",
",",
"composite",
"=",
"False",
")",
":",
"if",
"composite",
":",
"if",
"rname_rfc6680",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Your GSSAPI implementation does \"",
"\"not support RFC 6680 (the GSSAPI \"",
"\"naming exte... | Export this name as a token.
This method exports the name into a byte string which can then be
imported by using the `token` argument of the constructor.
Args:
composite (bool): whether or not use to a composite token --
:requires-ext:`rfc6680`
Returns:
bytes: the exported name in token form
Raises:
MechanismNameRequiredError
BadNameTypeError
BadNameError | [
"Export",
"this",
"name",
"as",
"a",
"token",
"."
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/names.py#L188-L215 | train | 22,937 |
pythongssapi/python-gssapi | gssapi/names.py | Name._inquire | def _inquire(self, **kwargs):
"""Inspect this name for information.
This method inspects the name for information.
If no keyword arguments are passed, all available information
is returned. Otherwise, only the keyword arguments that
are passed and set to `True` are returned.
Args:
mech_name (bool): get whether this is a mechanism name,
and, if so, the associated mechanism
attrs (bool): get the attributes names for this name
Returns:
InquireNameResult: the results of the inquiry, with unused
fields set to None
Raises:
GSSError
"""
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
if not kwargs:
default_val = True
else:
default_val = False
attrs = kwargs.get('attrs', default_val)
mech_name = kwargs.get('mech_name', default_val)
return rname_rfc6680.inquire_name(self, mech_name=mech_name,
attrs=attrs) | python | def _inquire(self, **kwargs):
"""Inspect this name for information.
This method inspects the name for information.
If no keyword arguments are passed, all available information
is returned. Otherwise, only the keyword arguments that
are passed and set to `True` are returned.
Args:
mech_name (bool): get whether this is a mechanism name,
and, if so, the associated mechanism
attrs (bool): get the attributes names for this name
Returns:
InquireNameResult: the results of the inquiry, with unused
fields set to None
Raises:
GSSError
"""
if rname_rfc6680 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"support RFC 6680 (the GSSAPI naming "
"extensions)")
if not kwargs:
default_val = True
else:
default_val = False
attrs = kwargs.get('attrs', default_val)
mech_name = kwargs.get('mech_name', default_val)
return rname_rfc6680.inquire_name(self, mech_name=mech_name,
attrs=attrs) | [
"def",
"_inquire",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"rname_rfc6680",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Your GSSAPI implementation does not \"",
"\"support RFC 6680 (the GSSAPI naming \"",
"\"extensions)\"",
")",
"if",
"not",... | Inspect this name for information.
This method inspects the name for information.
If no keyword arguments are passed, all available information
is returned. Otherwise, only the keyword arguments that
are passed and set to `True` are returned.
Args:
mech_name (bool): get whether this is a mechanism name,
and, if so, the associated mechanism
attrs (bool): get the attributes names for this name
Returns:
InquireNameResult: the results of the inquiry, with unused
fields set to None
Raises:
GSSError | [
"Inspect",
"this",
"name",
"for",
"information",
"."
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/names.py#L243-L279 | train | 22,938 |
pythongssapi/python-gssapi | gssapi/mechs.py | Mechanism.from_sasl_name | def from_sasl_name(cls, name=None):
"""
Create a Mechanism from its SASL name
Args:
name (str): SASL name of the desired mechanism
Returns:
Mechanism: the desired mechanism
Raises:
GSSError
:requires-ext:`rfc5801`
"""
if rfc5801 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5801")
if isinstance(name, six.text_type):
name = name.encode(_utils._get_encoding())
m = rfc5801.inquire_mech_for_saslname(name)
return cls(m) | python | def from_sasl_name(cls, name=None):
"""
Create a Mechanism from its SASL name
Args:
name (str): SASL name of the desired mechanism
Returns:
Mechanism: the desired mechanism
Raises:
GSSError
:requires-ext:`rfc5801`
"""
if rfc5801 is None:
raise NotImplementedError("Your GSSAPI implementation does not "
"have support for RFC 5801")
if isinstance(name, six.text_type):
name = name.encode(_utils._get_encoding())
m = rfc5801.inquire_mech_for_saslname(name)
return cls(m) | [
"def",
"from_sasl_name",
"(",
"cls",
",",
"name",
"=",
"None",
")",
":",
"if",
"rfc5801",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Your GSSAPI implementation does not \"",
"\"have support for RFC 5801\"",
")",
"if",
"isinstance",
"(",
"name",
",",
... | Create a Mechanism from its SASL name
Args:
name (str): SASL name of the desired mechanism
Returns:
Mechanism: the desired mechanism
Raises:
GSSError
:requires-ext:`rfc5801` | [
"Create",
"a",
"Mechanism",
"from",
"its",
"SASL",
"name"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/mechs.py#L141-L164 | train | 22,939 |
pythongssapi/python-gssapi | gssapi/_utils.py | import_gssapi_extension | def import_gssapi_extension(name):
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None | python | def import_gssapi_extension(name):
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None | [
"def",
"import_gssapi_extension",
"(",
"name",
")",
":",
"try",
":",
"path",
"=",
"'gssapi.raw.ext_{0}'",
".",
"format",
"(",
"name",
")",
"__import__",
"(",
"path",
")",
"return",
"sys",
".",
"modules",
"[",
"path",
"]",
"except",
"ImportError",
":",
"ret... | Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None | [
"Import",
"a",
"GSSAPI",
"extension",
"module"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L10-L30 | train | 22,940 |
pythongssapi/python-gssapi | gssapi/_utils.py | inquire_property | def inquire_property(name, doc=None):
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc) | python | def inquire_property(name, doc=None):
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self):
if not self._started:
msg = ("Cannot read {0} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc) | [
"def",
"inquire_property",
"(",
"name",
",",
"doc",
"=",
"None",
")",
":",
"def",
"inquire_property",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_started",
":",
"msg",
"=",
"(",
"\"Cannot read {0} from a security context whose \"",
"\"establishment has not ... | Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property | [
"Creates",
"a",
"property",
"based",
"on",
"an",
"inquire",
"result"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L46-L68 | train | 22,941 |
pythongssapi/python-gssapi | gssapi/_utils.py | _encode_dict | def _encode_dict(d):
"""Encodes any relevant strings in a dict"""
def enc(x):
if isinstance(x, six.text_type):
return x.encode(_ENCODING)
else:
return x
return dict((enc(k), enc(v)) for k, v in six.iteritems(d)) | python | def _encode_dict(d):
"""Encodes any relevant strings in a dict"""
def enc(x):
if isinstance(x, six.text_type):
return x.encode(_ENCODING)
else:
return x
return dict((enc(k), enc(v)) for k, v in six.iteritems(d)) | [
"def",
"_encode_dict",
"(",
"d",
")",
":",
"def",
"enc",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"six",
".",
"text_type",
")",
":",
"return",
"x",
".",
"encode",
"(",
"_ENCODING",
")",
"else",
":",
"return",
"x",
"return",
"dict",
... | Encodes any relevant strings in a dict | [
"Encodes",
"any",
"relevant",
"strings",
"in",
"a",
"dict"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L101-L109 | train | 22,942 |
pythongssapi/python-gssapi | gssapi/_utils.py | catch_and_return_token | def catch_and_return_token(func, self, *args, **kwargs):
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
if e.token is not None and self.__DEFER_STEP_ERRORS__:
self._last_err = e
# skip the "return func" line above in the traceback
if six.PY2:
self._last_tb = sys.exc_info()[2].tb_next.tb_next
else:
self._last_err.__traceback__ = e.__traceback__.tb_next
return e.token
else:
raise | python | def catch_and_return_token(func, self, *args, **kwargs):
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
if e.token is not None and self.__DEFER_STEP_ERRORS__:
self._last_err = e
# skip the "return func" line above in the traceback
if six.PY2:
self._last_tb = sys.exc_info()[2].tb_next.tb_next
else:
self._last_err.__traceback__ = e.__traceback__.tb_next
return e.token
else:
raise | [
"def",
"catch_and_return_token",
"(",
"func",
",",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"GSSError",
"as",
"e",
":",
"if",
... | Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use). | [
"Optionally",
"defer",
"exceptions",
"and",
"return",
"a",
"token",
"instead"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L114-L139 | train | 22,943 |
pythongssapi/python-gssapi | gssapi/_utils.py | check_last_err | def check_last_err(func, self, *args, **kwargs):
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
if six.PY2:
six.reraise(type(self._last_err), self._last_err,
self._last_tb)
else:
# NB(directxman12): not using six.reraise in Python 3 leads
# to cleaner tracebacks, and raise x is valid
# syntax in Python 3 (unlike raise x, y, z)
raise self._last_err
finally:
if six.PY2:
del self._last_tb # in case of cycles, break glass
self._last_err = None
else:
return func(self, *args, **kwargs)
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs) | python | def check_last_err(func, self, *args, **kwargs):
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
if six.PY2:
six.reraise(type(self._last_err), self._last_err,
self._last_tb)
else:
# NB(directxman12): not using six.reraise in Python 3 leads
# to cleaner tracebacks, and raise x is valid
# syntax in Python 3 (unlike raise x, y, z)
raise self._last_err
finally:
if six.PY2:
del self._last_tb # in case of cycles, break glass
self._last_err = None
else:
return func(self, *args, **kwargs)
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs) | [
"def",
"check_last_err",
"(",
"func",
",",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_last_err",
"is",
"not",
"None",
":",
"try",
":",
"if",
"six",
".",
"PY2",
":",
"six",
".",
"reraise",
"(",
"type",
"(",
... | Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback. | [
"Check",
"and",
"raise",
"deferred",
"errors",
"before",
"running",
"the",
"function"
] | b6efe72aa35a4c1fe21b397e15fcb41611e365ce | https://github.com/pythongssapi/python-gssapi/blob/b6efe72aa35a4c1fe21b397e15fcb41611e365ce/gssapi/_utils.py#L143-L177 | train | 22,944 |
theislab/scvelo | scvelo/plotting/velocity_graph.py | velocity_graph | def velocity_graph(adata, basis=None, vkey='velocity', which_graph='velocity', n_neighbors=10,
alpha=.8, perc=90, edge_width=.2, edge_color='grey', color=None, use_raw=None, layer=None,
color_map=None, colorbar=True, palette=None, size=None, sort_order=True, groups=None,
components=None, projection='2d', legend_loc='on data', legend_fontsize=None, legend_fontweight=None,
right_margin=None, left_margin=None, xlabel=None, ylabel=None, title=None, fontsize=None,
figsize=None, dpi=None, frameon=None, show=True, save=None, ax=None):
"""\
Plot of the velocity graph.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes.
which_graph: `'velocity'` or `'neighbors'` (default: `'velocity'`)
Whether to show transitions from velocity graph or connectivities from neighbors graph.
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False`
"""
basis = default_basis(adata) if basis is None else basis
title = which_graph + ' graph' if title is None else title
scatter_kwargs = {"basis": basis, "perc": perc, "use_raw": use_raw, "sort_order": sort_order, "alpha": alpha,
"components": components, "projection": projection, "legend_loc": legend_loc, "groups": groups,
"legend_fontsize": legend_fontsize, "legend_fontweight": legend_fontweight, "palette": palette,
"color_map": color_map, "frameon": frameon, "title": title, "xlabel": xlabel, "ylabel": ylabel,
"right_margin": right_margin, "left_margin": left_margin, "colorbar": colorbar, "dpi": dpi,
"fontsize": fontsize, "show": False, "save": None, "figsize": figsize, }
ax = scatter(adata, layer=layer, color=color, size=size, ax=ax, zorder=0, **scatter_kwargs)
from networkx import Graph, draw_networkx_edges
if which_graph == 'neighbors':
T = adata.uns['neighbors']['connectivities']
if perc is not None:
threshold = np.percentile(T.data, perc)
T.data[T.data < threshold] = 0
T.eliminate_zeros()
else:
T = transition_matrix(adata, vkey=vkey, weight_indirect_neighbors=0, n_neighbors=n_neighbors, perc=perc)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
edges = draw_networkx_edges(Graph(T), adata.obsm['X_' + basis], width=edge_width, edge_color=edge_color, ax=ax)
edges.set_zorder(-2)
edges.set_rasterized(settings._vector_friendly)
savefig_or_show('' if basis is None else basis, dpi=dpi, save=save, show=show)
if not show: return ax | python | def velocity_graph(adata, basis=None, vkey='velocity', which_graph='velocity', n_neighbors=10,
alpha=.8, perc=90, edge_width=.2, edge_color='grey', color=None, use_raw=None, layer=None,
color_map=None, colorbar=True, palette=None, size=None, sort_order=True, groups=None,
components=None, projection='2d', legend_loc='on data', legend_fontsize=None, legend_fontweight=None,
right_margin=None, left_margin=None, xlabel=None, ylabel=None, title=None, fontsize=None,
figsize=None, dpi=None, frameon=None, show=True, save=None, ax=None):
"""\
Plot of the velocity graph.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes.
which_graph: `'velocity'` or `'neighbors'` (default: `'velocity'`)
Whether to show transitions from velocity graph or connectivities from neighbors graph.
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False`
"""
basis = default_basis(adata) if basis is None else basis
title = which_graph + ' graph' if title is None else title
scatter_kwargs = {"basis": basis, "perc": perc, "use_raw": use_raw, "sort_order": sort_order, "alpha": alpha,
"components": components, "projection": projection, "legend_loc": legend_loc, "groups": groups,
"legend_fontsize": legend_fontsize, "legend_fontweight": legend_fontweight, "palette": palette,
"color_map": color_map, "frameon": frameon, "title": title, "xlabel": xlabel, "ylabel": ylabel,
"right_margin": right_margin, "left_margin": left_margin, "colorbar": colorbar, "dpi": dpi,
"fontsize": fontsize, "show": False, "save": None, "figsize": figsize, }
ax = scatter(adata, layer=layer, color=color, size=size, ax=ax, zorder=0, **scatter_kwargs)
from networkx import Graph, draw_networkx_edges
if which_graph == 'neighbors':
T = adata.uns['neighbors']['connectivities']
if perc is not None:
threshold = np.percentile(T.data, perc)
T.data[T.data < threshold] = 0
T.eliminate_zeros()
else:
T = transition_matrix(adata, vkey=vkey, weight_indirect_neighbors=0, n_neighbors=n_neighbors, perc=perc)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
edges = draw_networkx_edges(Graph(T), adata.obsm['X_' + basis], width=edge_width, edge_color=edge_color, ax=ax)
edges.set_zorder(-2)
edges.set_rasterized(settings._vector_friendly)
savefig_or_show('' if basis is None else basis, dpi=dpi, save=save, show=show)
if not show: return ax | [
"def",
"velocity_graph",
"(",
"adata",
",",
"basis",
"=",
"None",
",",
"vkey",
"=",
"'velocity'",
",",
"which_graph",
"=",
"'velocity'",
",",
"n_neighbors",
"=",
"10",
",",
"alpha",
"=",
".8",
",",
"perc",
"=",
"90",
",",
"edge_width",
"=",
".2",
",",
... | \
Plot of the velocity graph.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes.
which_graph: `'velocity'` or `'neighbors'` (default: `'velocity'`)
Whether to show transitions from velocity graph or connectivities from neighbors graph.
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False` | [
"\\",
"Plot",
"of",
"the",
"velocity",
"graph",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/plotting/velocity_graph.py#L13-L63 | train | 22,945 |
theislab/scvelo | scvelo/preprocessing/utils.py | cleanup | def cleanup(data, clean='layers', keep=None, copy=False):
"""Deletes attributes not needed.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
clean: `str` or list of `str` (default: `layers`)
Which attributes to consider for freeing memory.
keep: `str` or list of `str` (default: None)
Which attributes to keep.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with selection of attributes kept.
"""
adata = data.copy() if copy else data
keep = list([keep]) if isinstance(keep, str) else list() if keep is None else list(keep)
keep.extend(['spliced', 'unspliced', 'Ms', 'Mu', 'clusters', 'neighbors'])
ann_dict = {'obs': adata.obs_keys(), 'var': adata.var_keys(),
'uns': adata.uns_keys(), 'layers': list(adata.layers.keys())}
if 'all' not in clean:
ann_dict = {ann: values for (ann, values) in ann_dict.items() if ann in clean}
for (ann, values) in ann_dict.items():
for value in values:
if value not in keep: del(getattr(adata, ann)[value])
return adata if copy else None | python | def cleanup(data, clean='layers', keep=None, copy=False):
"""Deletes attributes not needed.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
clean: `str` or list of `str` (default: `layers`)
Which attributes to consider for freeing memory.
keep: `str` or list of `str` (default: None)
Which attributes to keep.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with selection of attributes kept.
"""
adata = data.copy() if copy else data
keep = list([keep]) if isinstance(keep, str) else list() if keep is None else list(keep)
keep.extend(['spliced', 'unspliced', 'Ms', 'Mu', 'clusters', 'neighbors'])
ann_dict = {'obs': adata.obs_keys(), 'var': adata.var_keys(),
'uns': adata.uns_keys(), 'layers': list(adata.layers.keys())}
if 'all' not in clean:
ann_dict = {ann: values for (ann, values) in ann_dict.items() if ann in clean}
for (ann, values) in ann_dict.items():
for value in values:
if value not in keep: del(getattr(adata, ann)[value])
return adata if copy else None | [
"def",
"cleanup",
"(",
"data",
",",
"clean",
"=",
"'layers'",
",",
"keep",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"adata",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"copy",
"else",
"data",
"keep",
"=",
"list",
"(",
"[",
"keep",
"]",
... | Deletes attributes not needed.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
clean: `str` or list of `str` (default: `layers`)
Which attributes to consider for freeing memory.
keep: `str` or list of `str` (default: None)
Which attributes to keep.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with selection of attributes kept. | [
"Deletes",
"attributes",
"not",
"needed",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/utils.py#L30-L63 | train | 22,946 |
theislab/scvelo | scvelo/preprocessing/utils.py | filter_genes | def filter_genes(data, min_counts=None, min_cells=None, max_counts=None, max_cells=None,
min_counts_u=None, min_cells_u=None, max_counts_u=None, max_cells_u=None,
min_shared_counts=None, min_shared_cells=None, copy=False):
"""Filter genes based on number of cells or counts.
Keep genes that have at least `min_counts` counts or are expressed in at
least `min_cells` cells or have at most `max_counts` counts or are expressed
in at most `max_cells` cells.
Only provide one of the optional parameters `min_counts`, `min_cells`,
`max_counts`, `max_cells` per call.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.spmatrix`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
min_counts : `int`, optional (default: `None`)
Minimum number of counts required for a gene to pass filtering.
min_cells : `int`, optional (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering.
max_counts : `int`, optional (default: `None`)
Maximum number of counts required for a gene to pass filtering.
max_cells : `int`, optional (default: `None`)
Maximum number of cells expressed required for a gene to pass filtering.
min_counts_u : `int`, optional (default: `None`)
Minimum number of unspliced counts required for a gene to pass filtering.
min_cells_u : `int`, optional (default: `None`)
Minimum number of unspliced cells expressed required for a gene to pass filtering.
max_counts_u : `int`, optional (default: `None`)
Maximum number of unspliced counts required for a gene to pass filtering.
max_cells_u : `int`, optional (default: `None`)
Maximum number of unspliced cells expressed required for a gene to pass filtering.
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
copy : `bool`, optional (default: `False`)
Determines whether a copy is returned.
Returns
-------
Filters the object and adds `n_counts` to `adata.var`.
"""
adata = data.copy() if copy else data
# set initial cell sizes before filtering
set_initial_size(adata)
layers = [layer for layer in ['spliced', 'unspliced'] if layer in adata.layers.keys()]
if min_shared_counts is not None or min_shared_cells is not None: layers.extend(['shared'])
for layer in layers:
if layer is 'spliced':
_min_counts, _min_cells, _max_counts, _max_cells = min_counts, min_cells, max_counts, max_cells
elif layer is 'unspliced':
_min_counts, _min_cells, _max_counts, _max_cells = min_counts_u, min_cells_u, max_counts_u, max_cells_u
else: # shared counts/cells
_min_counts, _min_cells, _max_counts, _max_cells = min_shared_counts, min_shared_cells, None, None
if layer in adata.layers.keys():
X = adata.layers[layer]
else: # shared counts/cells
Xs, Xu = adata.layers['spliced'], adata.layers['unspliced']
nonzeros = (Xs > 0).multiply(Xu > 0) if issparse(Xs) else (Xs > 0) * (Xu > 0)
X = nonzeros.multiply(Xs) + nonzeros.multiply(Xu) if issparse(nonzeros) else nonzeros * (Xs + Xu)
gene_subset = np.ones(adata.n_vars, dtype=bool)
if _min_counts is not None or _max_counts is not None:
gene_subset, _ = filter(X, min_counts=_min_counts, max_counts=_max_counts)
adata._inplace_subset_var(gene_subset)
if _min_cells is not None or _max_cells is not None:
gene_subset, _ = filter(X, min_cells=_min_cells, max_cells=_max_cells)
adata._inplace_subset_var(gene_subset)
s = np.sum(~gene_subset)
if s > 0:
logg.info('Filtered out {} genes that are detected'.format(s), end=' ')
if _min_cells is not None or _min_counts is not None:
logg.info('in less than', str(_min_cells) + ' cells (' + str(layer) + ').' if _min_counts is None
else str(_min_counts) + ' counts (' + str(layer) + ').', no_indent=True)
if max_cells is not None or max_counts is not None:
logg.info('in more than ', str(_max_cells) + ' cells(' + str(layer) + ').' if _max_counts is None
else str(_max_counts) + ' counts (' + str(layer) + ').', no_indent=True)
return adata if copy else None | python | def filter_genes(data, min_counts=None, min_cells=None, max_counts=None, max_cells=None,
min_counts_u=None, min_cells_u=None, max_counts_u=None, max_cells_u=None,
min_shared_counts=None, min_shared_cells=None, copy=False):
"""Filter genes based on number of cells or counts.
Keep genes that have at least `min_counts` counts or are expressed in at
least `min_cells` cells or have at most `max_counts` counts or are expressed
in at most `max_cells` cells.
Only provide one of the optional parameters `min_counts`, `min_cells`,
`max_counts`, `max_cells` per call.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.spmatrix`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
min_counts : `int`, optional (default: `None`)
Minimum number of counts required for a gene to pass filtering.
min_cells : `int`, optional (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering.
max_counts : `int`, optional (default: `None`)
Maximum number of counts required for a gene to pass filtering.
max_cells : `int`, optional (default: `None`)
Maximum number of cells expressed required for a gene to pass filtering.
min_counts_u : `int`, optional (default: `None`)
Minimum number of unspliced counts required for a gene to pass filtering.
min_cells_u : `int`, optional (default: `None`)
Minimum number of unspliced cells expressed required for a gene to pass filtering.
max_counts_u : `int`, optional (default: `None`)
Maximum number of unspliced counts required for a gene to pass filtering.
max_cells_u : `int`, optional (default: `None`)
Maximum number of unspliced cells expressed required for a gene to pass filtering.
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
copy : `bool`, optional (default: `False`)
Determines whether a copy is returned.
Returns
-------
Filters the object and adds `n_counts` to `adata.var`.
"""
adata = data.copy() if copy else data
# set initial cell sizes before filtering
set_initial_size(adata)
layers = [layer for layer in ['spliced', 'unspliced'] if layer in adata.layers.keys()]
if min_shared_counts is not None or min_shared_cells is not None: layers.extend(['shared'])
for layer in layers:
if layer is 'spliced':
_min_counts, _min_cells, _max_counts, _max_cells = min_counts, min_cells, max_counts, max_cells
elif layer is 'unspliced':
_min_counts, _min_cells, _max_counts, _max_cells = min_counts_u, min_cells_u, max_counts_u, max_cells_u
else: # shared counts/cells
_min_counts, _min_cells, _max_counts, _max_cells = min_shared_counts, min_shared_cells, None, None
if layer in adata.layers.keys():
X = adata.layers[layer]
else: # shared counts/cells
Xs, Xu = adata.layers['spliced'], adata.layers['unspliced']
nonzeros = (Xs > 0).multiply(Xu > 0) if issparse(Xs) else (Xs > 0) * (Xu > 0)
X = nonzeros.multiply(Xs) + nonzeros.multiply(Xu) if issparse(nonzeros) else nonzeros * (Xs + Xu)
gene_subset = np.ones(adata.n_vars, dtype=bool)
if _min_counts is not None or _max_counts is not None:
gene_subset, _ = filter(X, min_counts=_min_counts, max_counts=_max_counts)
adata._inplace_subset_var(gene_subset)
if _min_cells is not None or _max_cells is not None:
gene_subset, _ = filter(X, min_cells=_min_cells, max_cells=_max_cells)
adata._inplace_subset_var(gene_subset)
s = np.sum(~gene_subset)
if s > 0:
logg.info('Filtered out {} genes that are detected'.format(s), end=' ')
if _min_cells is not None or _min_counts is not None:
logg.info('in less than', str(_min_cells) + ' cells (' + str(layer) + ').' if _min_counts is None
else str(_min_counts) + ' counts (' + str(layer) + ').', no_indent=True)
if max_cells is not None or max_counts is not None:
logg.info('in more than ', str(_max_cells) + ' cells(' + str(layer) + ').' if _max_counts is None
else str(_max_counts) + ' counts (' + str(layer) + ').', no_indent=True)
return adata if copy else None | [
"def",
"filter_genes",
"(",
"data",
",",
"min_counts",
"=",
"None",
",",
"min_cells",
"=",
"None",
",",
"max_counts",
"=",
"None",
",",
"max_cells",
"=",
"None",
",",
"min_counts_u",
"=",
"None",
",",
"min_cells_u",
"=",
"None",
",",
"max_counts_u",
"=",
... | Filter genes based on number of cells or counts.
Keep genes that have at least `min_counts` counts or are expressed in at
least `min_cells` cells or have at most `max_counts` counts or are expressed
in at most `max_cells` cells.
Only provide one of the optional parameters `min_counts`, `min_cells`,
`max_counts`, `max_cells` per call.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.spmatrix`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
min_counts : `int`, optional (default: `None`)
Minimum number of counts required for a gene to pass filtering.
min_cells : `int`, optional (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering.
max_counts : `int`, optional (default: `None`)
Maximum number of counts required for a gene to pass filtering.
max_cells : `int`, optional (default: `None`)
Maximum number of cells expressed required for a gene to pass filtering.
min_counts_u : `int`, optional (default: `None`)
Minimum number of unspliced counts required for a gene to pass filtering.
min_cells_u : `int`, optional (default: `None`)
Minimum number of unspliced cells expressed required for a gene to pass filtering.
max_counts_u : `int`, optional (default: `None`)
Maximum number of unspliced counts required for a gene to pass filtering.
max_cells_u : `int`, optional (default: `None`)
Maximum number of unspliced cells expressed required for a gene to pass filtering.
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
copy : `bool`, optional (default: `False`)
Determines whether a copy is returned.
Returns
-------
Filters the object and adds `n_counts` to `adata.var`. | [
"Filter",
"genes",
"based",
"on",
"number",
"of",
"cells",
"or",
"counts",
".",
"Keep",
"genes",
"that",
"have",
"at",
"least",
"min_counts",
"counts",
"or",
"are",
"expressed",
"in",
"at",
"least",
"min_cells",
"cells",
"or",
"have",
"at",
"most",
"max_co... | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/utils.py#L99-L185 | train | 22,947 |
theislab/scvelo | scvelo/preprocessing/utils.py | filter_genes_dispersion | def filter_genes_dispersion(data, flavor='seurat', min_disp=None, max_disp=None, min_mean=None, max_mean=None,
n_bins=20, n_top_genes=None, log=True, copy=False):
"""Extract highly variable genes.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
flavor : {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing
'seurat', this expects non-logarithmized data - the logarithm of mean
and dispersion is taken internally when `log` is at its default value
`True`. For 'cell_ranger', this is usually called for logarithmized data
- in this case you should set `log` to `False`. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
min_mean=0.0125, max_mean=3, min_disp=0.5, max_disp=`None` : `float`, optional
If `n_top_genes` unequals `None`, these cutoffs for the means and the
normalized dispersions are ignored.
n_bins : `int` (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1. You'll be informed
about this if you set `settings.verbosity = 4`.
n_top_genes : `int` or `None` (default: `None`)
Number of highly-variable genes to keep.
log : `bool`, optional (default: `True`)
Use the logarithm of the mean to variance ratio.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
If an AnnData `adata` is passed, returns or updates `adata` depending on \
`copy`. It filters the `adata` and adds the annotations
"""
adata = data.copy() if copy else data
set_initial_size(adata)
if n_top_genes is not None and adata.n_vars < n_top_genes:
logg.info('Skip filtering by dispersion since number of variables are less than `n_top_genes`')
else:
if flavor is 'svr':
mu = adata.X.mean(0).A1 if issparse(adata.X) else adata.X.mean(0)
sigma = np.sqrt(adata.X.multiply(adata.X).mean(0).A1 - mu ** 2) if issparse(adata.X) else adata.X.std(0)
log_mu = np.log2(mu)
log_cv = np.log2(sigma / mu)
from sklearn.svm import SVR
clf = SVR(gamma=150. / len(mu))
clf.fit(log_mu[:, None], log_cv)
score = log_cv - clf.predict(log_mu[:, None])
nth_score = np.sort(score)[::-1][n_top_genes]
adata._inplace_subset_var(score >= nth_score)
else:
from scanpy.api.pp import filter_genes_dispersion
filter_genes_dispersion(adata, flavor=flavor, min_disp=min_disp, max_disp=max_disp, min_mean=min_mean,
max_mean=max_mean, n_bins=n_bins, n_top_genes=n_top_genes, log=log)
return adata if copy else None | python | def filter_genes_dispersion(data, flavor='seurat', min_disp=None, max_disp=None, min_mean=None, max_mean=None,
n_bins=20, n_top_genes=None, log=True, copy=False):
"""Extract highly variable genes.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
flavor : {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing
'seurat', this expects non-logarithmized data - the logarithm of mean
and dispersion is taken internally when `log` is at its default value
`True`. For 'cell_ranger', this is usually called for logarithmized data
- in this case you should set `log` to `False`. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
min_mean=0.0125, max_mean=3, min_disp=0.5, max_disp=`None` : `float`, optional
If `n_top_genes` unequals `None`, these cutoffs for the means and the
normalized dispersions are ignored.
n_bins : `int` (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1. You'll be informed
about this if you set `settings.verbosity = 4`.
n_top_genes : `int` or `None` (default: `None`)
Number of highly-variable genes to keep.
log : `bool`, optional (default: `True`)
Use the logarithm of the mean to variance ratio.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
If an AnnData `adata` is passed, returns or updates `adata` depending on \
`copy`. It filters the `adata` and adds the annotations
"""
adata = data.copy() if copy else data
set_initial_size(adata)
if n_top_genes is not None and adata.n_vars < n_top_genes:
logg.info('Skip filtering by dispersion since number of variables are less than `n_top_genes`')
else:
if flavor is 'svr':
mu = adata.X.mean(0).A1 if issparse(adata.X) else adata.X.mean(0)
sigma = np.sqrt(adata.X.multiply(adata.X).mean(0).A1 - mu ** 2) if issparse(adata.X) else adata.X.std(0)
log_mu = np.log2(mu)
log_cv = np.log2(sigma / mu)
from sklearn.svm import SVR
clf = SVR(gamma=150. / len(mu))
clf.fit(log_mu[:, None], log_cv)
score = log_cv - clf.predict(log_mu[:, None])
nth_score = np.sort(score)[::-1][n_top_genes]
adata._inplace_subset_var(score >= nth_score)
else:
from scanpy.api.pp import filter_genes_dispersion
filter_genes_dispersion(adata, flavor=flavor, min_disp=min_disp, max_disp=max_disp, min_mean=min_mean,
max_mean=max_mean, n_bins=n_bins, n_top_genes=n_top_genes, log=log)
return adata if copy else None | [
"def",
"filter_genes_dispersion",
"(",
"data",
",",
"flavor",
"=",
"'seurat'",
",",
"min_disp",
"=",
"None",
",",
"max_disp",
"=",
"None",
",",
"min_mean",
"=",
"None",
",",
"max_mean",
"=",
"None",
",",
"n_bins",
"=",
"20",
",",
"n_top_genes",
"=",
"Non... | Extract highly variable genes.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
flavor : {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing
'seurat', this expects non-logarithmized data - the logarithm of mean
and dispersion is taken internally when `log` is at its default value
`True`. For 'cell_ranger', this is usually called for logarithmized data
- in this case you should set `log` to `False`. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
min_mean=0.0125, max_mean=3, min_disp=0.5, max_disp=`None` : `float`, optional
If `n_top_genes` unequals `None`, these cutoffs for the means and the
normalized dispersions are ignored.
n_bins : `int` (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1. You'll be informed
about this if you set `settings.verbosity = 4`.
n_top_genes : `int` or `None` (default: `None`)
Number of highly-variable genes to keep.
log : `bool`, optional (default: `True`)
Use the logarithm of the mean to variance ratio.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
If an AnnData `adata` is passed, returns or updates `adata` depending on \
`copy`. It filters the `adata` and adds the annotations | [
"Extract",
"highly",
"variable",
"genes",
".",
"The",
"normalized",
"dispersion",
"is",
"obtained",
"by",
"scaling",
"with",
"the",
"mean",
"and",
"standard",
"deviation",
"of",
"the",
"dispersions",
"for",
"genes",
"falling",
"into",
"a",
"given",
"bin",
"for... | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/utils.py#L188-L251 | train | 22,948 |
theislab/scvelo | scvelo/preprocessing/utils.py | normalize_per_cell | def normalize_per_cell(data, counts_per_cell_after=None, counts_per_cell=None, key_n_counts=None,
max_proportion_per_cell=None, use_initial_size=True, layers=['spliced', 'unspliced'],
enforce=False, copy=False):
"""Normalize each cell by total counts over all genes.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
counts_per_cell_after : `float` or `None`, optional (default: `None`)
If `None`, after normalization, each cell has a total count equal
to the median of the *counts_per_cell* before normalization.
counts_per_cell : `np.array`, optional (default: `None`)
Precomputed counts per cell.
key_n_counts : `str`, optional (default: `'n_counts'`)
Name of the field in `adata.obs` where the total counts per cell are
stored.
max_proportion_per_cell : `int` (default: `None`)
Exclude genes counts that account for more than a specific proportion of cell size, e.g. 0.05.
use_initial_size : `bool` (default: `True`)
Whether to use initial cell sizes oder actual cell sizes.
layers : `str` or `list` (default: `{'spliced', 'unspliced'}`)
Keys for layers to be also considered for normalization.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
Returns or updates `adata` with normalized version of the original `adata.X`, depending on `copy`.
"""
adata = data.copy() if copy else data
layers = adata.layers.keys() if layers is 'all' else [layers] if isinstance(layers, str) \
else [layer for layer in layers if layer in adata.layers.keys()]
layers = ['X'] + layers
modified_layers = []
for layer in layers:
X = adata.X if layer is 'X' else adata.layers[layer]
if not_yet_normalized(X) or enforce:
counts = counts_per_cell if counts_per_cell is not None \
else get_initial_size(adata, layer) if use_initial_size else get_size(adata, layer)
if max_proportion_per_cell is not None and (0 < max_proportion_per_cell < 1):
counts = counts_per_cell_quantile(X, max_proportion_per_cell, counts)
# equivalent to scanpy.pp.normalize_per_cell(X, counts_per_cell_after, counts)
counts_after = np.median(counts) if counts_per_cell_after is None else counts_per_cell_after
counts /= counts_after + (counts_after == 0)
counts += counts == 0 # to avoid division by zero
if issparse(X):
sparsefuncs.inplace_row_scale(X, 1 / counts)
else:
X /= np.array(counts[:, None])
modified_layers.append(layer)
adata.obs['n_counts' if key_n_counts is None else key_n_counts] = get_size(adata)
if len(modified_layers) > 0:
logg.info('Normalized count data:', ', '.join(modified_layers) + '.')
return adata if copy else None | python | def normalize_per_cell(data, counts_per_cell_after=None, counts_per_cell=None, key_n_counts=None,
max_proportion_per_cell=None, use_initial_size=True, layers=['spliced', 'unspliced'],
enforce=False, copy=False):
"""Normalize each cell by total counts over all genes.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
counts_per_cell_after : `float` or `None`, optional (default: `None`)
If `None`, after normalization, each cell has a total count equal
to the median of the *counts_per_cell* before normalization.
counts_per_cell : `np.array`, optional (default: `None`)
Precomputed counts per cell.
key_n_counts : `str`, optional (default: `'n_counts'`)
Name of the field in `adata.obs` where the total counts per cell are
stored.
max_proportion_per_cell : `int` (default: `None`)
Exclude genes counts that account for more than a specific proportion of cell size, e.g. 0.05.
use_initial_size : `bool` (default: `True`)
Whether to use initial cell sizes oder actual cell sizes.
layers : `str` or `list` (default: `{'spliced', 'unspliced'}`)
Keys for layers to be also considered for normalization.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
Returns or updates `adata` with normalized version of the original `adata.X`, depending on `copy`.
"""
adata = data.copy() if copy else data
layers = adata.layers.keys() if layers is 'all' else [layers] if isinstance(layers, str) \
else [layer for layer in layers if layer in adata.layers.keys()]
layers = ['X'] + layers
modified_layers = []
for layer in layers:
X = adata.X if layer is 'X' else adata.layers[layer]
if not_yet_normalized(X) or enforce:
counts = counts_per_cell if counts_per_cell is not None \
else get_initial_size(adata, layer) if use_initial_size else get_size(adata, layer)
if max_proportion_per_cell is not None and (0 < max_proportion_per_cell < 1):
counts = counts_per_cell_quantile(X, max_proportion_per_cell, counts)
# equivalent to scanpy.pp.normalize_per_cell(X, counts_per_cell_after, counts)
counts_after = np.median(counts) if counts_per_cell_after is None else counts_per_cell_after
counts /= counts_after + (counts_after == 0)
counts += counts == 0 # to avoid division by zero
if issparse(X):
sparsefuncs.inplace_row_scale(X, 1 / counts)
else:
X /= np.array(counts[:, None])
modified_layers.append(layer)
adata.obs['n_counts' if key_n_counts is None else key_n_counts] = get_size(adata)
if len(modified_layers) > 0:
logg.info('Normalized count data:', ', '.join(modified_layers) + '.')
return adata if copy else None | [
"def",
"normalize_per_cell",
"(",
"data",
",",
"counts_per_cell_after",
"=",
"None",
",",
"counts_per_cell",
"=",
"None",
",",
"key_n_counts",
"=",
"None",
",",
"max_proportion_per_cell",
"=",
"None",
",",
"use_initial_size",
"=",
"True",
",",
"layers",
"=",
"["... | Normalize each cell by total counts over all genes.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
counts_per_cell_after : `float` or `None`, optional (default: `None`)
If `None`, after normalization, each cell has a total count equal
to the median of the *counts_per_cell* before normalization.
counts_per_cell : `np.array`, optional (default: `None`)
Precomputed counts per cell.
key_n_counts : `str`, optional (default: `'n_counts'`)
Name of the field in `adata.obs` where the total counts per cell are
stored.
max_proportion_per_cell : `int` (default: `None`)
Exclude genes counts that account for more than a specific proportion of cell size, e.g. 0.05.
use_initial_size : `bool` (default: `True`)
Whether to use initial cell sizes oder actual cell sizes.
layers : `str` or `list` (default: `{'spliced', 'unspliced'}`)
Keys for layers to be also considered for normalization.
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
Returns or updates `adata` with normalized version of the original `adata.X`, depending on `copy`. | [
"Normalize",
"each",
"cell",
"by",
"total",
"counts",
"over",
"all",
"genes",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/utils.py#L266-L325 | train | 22,949 |
theislab/scvelo | scvelo/preprocessing/utils.py | filter_and_normalize | def filter_and_normalize(data, min_counts=None, min_counts_u=None, min_cells=None, min_cells_u=None,
min_shared_counts=None, min_shared_cells=None, n_top_genes=None, flavor='seurat', log=True,
copy=False):
"""Filtering, normalization and log transform
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Runs the following steps
.. code:: python
scv.pp.filter_genes(adata)
scv.pp.normalize_per_cell(adata)
if n_top_genes is not None:
scv.pp.filter_genes_dispersion(adata)
if log:
scv.pp.log1p(adata)
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
min_counts: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (spliced).
min_counts_u: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (unspliced).
min_cells: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (spliced).
min_cells_u: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (unspliced).
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
n_top_genes: `int` (default: `None`)
Number of genes to keep.
flavor: {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing 'seurat', this expects non-logarithmized data.
log: `bool` (default: `True`)
Take logarithm.
copy: `bool` (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`.
"""
adata = data.copy() if copy else data
if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys():
raise ValueError('Could not find spliced / unspliced counts.')
filter_genes(adata, min_counts=min_counts, min_counts_u=min_counts_u, min_cells=min_cells, min_cells_u=min_cells_u,
min_shared_counts=min_shared_counts, min_shared_cells=min_shared_cells,)
normalize_per_cell(adata)
if n_top_genes is not None:
filter_genes_dispersion(adata, n_top_genes=n_top_genes, flavor=flavor)
log_advised = np.allclose(adata.X[:10].sum(), adata.layers['spliced'][:10].sum())
if log and log_advised:
log1p(adata)
logg.info('Logarithmized X.' if log and log_advised else
'Did not modify X as it looks preprocessed already.' if log else
'Consider logarithmizing X with `scv.pp.log1p` for better results.' if log_advised else '')
return adata if copy else None | python | def filter_and_normalize(data, min_counts=None, min_counts_u=None, min_cells=None, min_cells_u=None,
min_shared_counts=None, min_shared_cells=None, n_top_genes=None, flavor='seurat', log=True,
copy=False):
"""Filtering, normalization and log transform
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Runs the following steps
.. code:: python
scv.pp.filter_genes(adata)
scv.pp.normalize_per_cell(adata)
if n_top_genes is not None:
scv.pp.filter_genes_dispersion(adata)
if log:
scv.pp.log1p(adata)
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
min_counts: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (spliced).
min_counts_u: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (unspliced).
min_cells: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (spliced).
min_cells_u: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (unspliced).
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
n_top_genes: `int` (default: `None`)
Number of genes to keep.
flavor: {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing 'seurat', this expects non-logarithmized data.
log: `bool` (default: `True`)
Take logarithm.
copy: `bool` (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`.
"""
adata = data.copy() if copy else data
if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys():
raise ValueError('Could not find spliced / unspliced counts.')
filter_genes(adata, min_counts=min_counts, min_counts_u=min_counts_u, min_cells=min_cells, min_cells_u=min_cells_u,
min_shared_counts=min_shared_counts, min_shared_cells=min_shared_cells,)
normalize_per_cell(adata)
if n_top_genes is not None:
filter_genes_dispersion(adata, n_top_genes=n_top_genes, flavor=flavor)
log_advised = np.allclose(adata.X[:10].sum(), adata.layers['spliced'][:10].sum())
if log and log_advised:
log1p(adata)
logg.info('Logarithmized X.' if log and log_advised else
'Did not modify X as it looks preprocessed already.' if log else
'Consider logarithmizing X with `scv.pp.log1p` for better results.' if log_advised else '')
return adata if copy else None | [
"def",
"filter_and_normalize",
"(",
"data",
",",
"min_counts",
"=",
"None",
",",
"min_counts_u",
"=",
"None",
",",
"min_cells",
"=",
"None",
",",
"min_cells_u",
"=",
"None",
",",
"min_shared_counts",
"=",
"None",
",",
"min_shared_cells",
"=",
"None",
",",
"n... | Filtering, normalization and log transform
Expects non-logarithmized data. If using logarithmized data, pass `log=False`.
Runs the following steps
.. code:: python
scv.pp.filter_genes(adata)
scv.pp.normalize_per_cell(adata)
if n_top_genes is not None:
scv.pp.filter_genes_dispersion(adata)
if log:
scv.pp.log1p(adata)
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
min_counts: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (spliced).
min_counts_u: `int` (default: `None`)
Minimum number of counts required for a gene to pass filtering (unspliced).
min_cells: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (spliced).
min_cells_u: `int` (default: `None`)
Minimum number of cells expressed required for a gene to pass filtering (unspliced).
min_shared_counts: `int`, optional (default: `None`)
Minimum number of counts (in cells expressed simultaneously in unspliced and spliced) required for a gene.
min_shared_cells: `int`, optional (default: `None`)
Minimum number of cells required for a gene to be expressed simultaneously in unspliced and spliced.
n_top_genes: `int` (default: `None`)
Number of genes to keep.
flavor: {'seurat', 'cell_ranger', 'svr'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing 'seurat', this expects non-logarithmized data.
log: `bool` (default: `True`)
Take logarithm.
copy: `bool` (default: `False`)
Return a copy of `adata` instead of updating it.
Returns
-------
Returns or updates `adata` depending on `copy`. | [
"Filtering",
"normalization",
"and",
"log",
"transform"
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/utils.py#L347-L414 | train | 22,950 |
theislab/scvelo | scvelo/datasets.py | toy_data | def toy_data(n_obs):
"""
Randomly samples from the Dentate Gyrus dataset.
Arguments
---------
n_obs: `int`
Size of the sampled dataset
Returns
-------
Returns `adata` object
"""
"""Random samples from Dentate Gyrus.
"""
adata = dentategyrus()
indices = np.random.choice(adata.n_obs, n_obs)
adata = adata[indices]
adata.obs_names = np.array(range(adata.n_obs), dtype='str')
adata.var_names_make_unique()
return adata | python | def toy_data(n_obs):
"""
Randomly samples from the Dentate Gyrus dataset.
Arguments
---------
n_obs: `int`
Size of the sampled dataset
Returns
-------
Returns `adata` object
"""
"""Random samples from Dentate Gyrus.
"""
adata = dentategyrus()
indices = np.random.choice(adata.n_obs, n_obs)
adata = adata[indices]
adata.obs_names = np.array(range(adata.n_obs), dtype='str')
adata.var_names_make_unique()
return adata | [
"def",
"toy_data",
"(",
"n_obs",
")",
":",
"\"\"\"Random samples from Dentate Gyrus.\n \"\"\"",
"adata",
"=",
"dentategyrus",
"(",
")",
"indices",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"adata",
".",
"n_obs",
",",
"n_obs",
")",
"adata",
"=",
"adata",... | Randomly samples from the Dentate Gyrus dataset.
Arguments
---------
n_obs: `int`
Size of the sampled dataset
Returns
-------
Returns `adata` object | [
"Randomly",
"samples",
"from",
"the",
"Dentate",
"Gyrus",
"dataset",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/datasets.py#L10-L31 | train | 22,951 |
theislab/scvelo | scvelo/datasets.py | forebrain | def forebrain():
"""Developing human forebrain.
Forebrain tissue of a week 10 embryo, focusing on the glutamatergic neuronal lineage.
Returns
-------
Returns `adata` object
"""
filename = 'data/ForebrainGlut/hgForebrainGlut.loom'
url = 'http://pklab.med.harvard.edu/velocyto/hgForebrainGlut/hgForebrainGlut.loom'
adata = read(filename, backup_url=url, cleanup=True, sparse=True, cache=True)
adata.var_names_make_unique()
return adata | python | def forebrain():
"""Developing human forebrain.
Forebrain tissue of a week 10 embryo, focusing on the glutamatergic neuronal lineage.
Returns
-------
Returns `adata` object
"""
filename = 'data/ForebrainGlut/hgForebrainGlut.loom'
url = 'http://pklab.med.harvard.edu/velocyto/hgForebrainGlut/hgForebrainGlut.loom'
adata = read(filename, backup_url=url, cleanup=True, sparse=True, cache=True)
adata.var_names_make_unique()
return adata | [
"def",
"forebrain",
"(",
")",
":",
"filename",
"=",
"'data/ForebrainGlut/hgForebrainGlut.loom'",
"url",
"=",
"'http://pklab.med.harvard.edu/velocyto/hgForebrainGlut/hgForebrainGlut.loom'",
"adata",
"=",
"read",
"(",
"filename",
",",
"backup_url",
"=",
"url",
",",
"cleanup",... | Developing human forebrain.
Forebrain tissue of a week 10 embryo, focusing on the glutamatergic neuronal lineage.
Returns
-------
Returns `adata` object | [
"Developing",
"human",
"forebrain",
".",
"Forebrain",
"tissue",
"of",
"a",
"week",
"10",
"embryo",
"focusing",
"on",
"the",
"glutamatergic",
"neuronal",
"lineage",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/datasets.py#L68-L80 | train | 22,952 |
theislab/scvelo | scvelo/settings.py | set_rcParams_scvelo | def set_rcParams_scvelo(fontsize=8, color_map=None, frameon=None):
"""Set matplotlib.rcParams to scvelo defaults."""
# dpi options (mpl default: 100, 100)
rcParams['figure.dpi'] = 100
rcParams['savefig.dpi'] = 150
# figure (mpl default: 0.125, 0.96, 0.15, 0.91)
rcParams['figure.figsize'] = (7, 5)
rcParams['figure.subplot.left'] = 0.18
rcParams['figure.subplot.right'] = 0.96
rcParams['figure.subplot.bottom'] = 0.15
rcParams['figure.subplot.top'] = 0.91
# lines (defaults: 1.5, 6, 1)
rcParams['lines.linewidth'] = 1.5 # the line width of the frame
rcParams['lines.markersize'] = 6
rcParams['lines.markeredgewidth'] = 1
# font
rcParams['font.sans-serif'] = \
['Arial', 'Helvetica', 'DejaVu Sans',
'Bitstream Vera Sans', 'sans-serif']
fontsize = fontsize
labelsize = 0.92 * fontsize
# fonsizes (mpl default: 10, medium, large, medium)
rcParams['font.size'] = fontsize
rcParams['legend.fontsize'] = labelsize
rcParams['axes.titlesize'] = fontsize
rcParams['axes.labelsize'] = labelsize
# legend (mpl default: 1, 1, 2, 0.8)
rcParams['legend.numpoints'] = 1
rcParams['legend.scatterpoints'] = 1
rcParams['legend.handlelength'] = 0.5
rcParams['legend.handletextpad'] = 0.4
# color cycle
rcParams['axes.prop_cycle'] = cycler(color=vega_10)
# axes
rcParams['axes.linewidth'] = 0.8
rcParams['axes.edgecolor'] = 'black'
rcParams['axes.facecolor'] = 'white'
# ticks (mpl default: k, k, medium, medium)
rcParams['xtick.color'] = 'k'
rcParams['ytick.color'] = 'k'
rcParams['xtick.labelsize'] = labelsize
rcParams['ytick.labelsize'] = labelsize
# axes grid (mpl default: False, #b0b0b0)
rcParams['axes.grid'] = False
rcParams['grid.color'] = '.8'
# color map
rcParams['image.cmap'] = 'RdBu_r' if color_map is None else color_map
# frame (mpl default: True)
frameon = False if frameon is None else frameon
global _frameon
_frameon = frameon | python | def set_rcParams_scvelo(fontsize=8, color_map=None, frameon=None):
"""Set matplotlib.rcParams to scvelo defaults."""
# dpi options (mpl default: 100, 100)
rcParams['figure.dpi'] = 100
rcParams['savefig.dpi'] = 150
# figure (mpl default: 0.125, 0.96, 0.15, 0.91)
rcParams['figure.figsize'] = (7, 5)
rcParams['figure.subplot.left'] = 0.18
rcParams['figure.subplot.right'] = 0.96
rcParams['figure.subplot.bottom'] = 0.15
rcParams['figure.subplot.top'] = 0.91
# lines (defaults: 1.5, 6, 1)
rcParams['lines.linewidth'] = 1.5 # the line width of the frame
rcParams['lines.markersize'] = 6
rcParams['lines.markeredgewidth'] = 1
# font
rcParams['font.sans-serif'] = \
['Arial', 'Helvetica', 'DejaVu Sans',
'Bitstream Vera Sans', 'sans-serif']
fontsize = fontsize
labelsize = 0.92 * fontsize
# fonsizes (mpl default: 10, medium, large, medium)
rcParams['font.size'] = fontsize
rcParams['legend.fontsize'] = labelsize
rcParams['axes.titlesize'] = fontsize
rcParams['axes.labelsize'] = labelsize
# legend (mpl default: 1, 1, 2, 0.8)
rcParams['legend.numpoints'] = 1
rcParams['legend.scatterpoints'] = 1
rcParams['legend.handlelength'] = 0.5
rcParams['legend.handletextpad'] = 0.4
# color cycle
rcParams['axes.prop_cycle'] = cycler(color=vega_10)
# axes
rcParams['axes.linewidth'] = 0.8
rcParams['axes.edgecolor'] = 'black'
rcParams['axes.facecolor'] = 'white'
# ticks (mpl default: k, k, medium, medium)
rcParams['xtick.color'] = 'k'
rcParams['ytick.color'] = 'k'
rcParams['xtick.labelsize'] = labelsize
rcParams['ytick.labelsize'] = labelsize
# axes grid (mpl default: False, #b0b0b0)
rcParams['axes.grid'] = False
rcParams['grid.color'] = '.8'
# color map
rcParams['image.cmap'] = 'RdBu_r' if color_map is None else color_map
# frame (mpl default: True)
frameon = False if frameon is None else frameon
global _frameon
_frameon = frameon | [
"def",
"set_rcParams_scvelo",
"(",
"fontsize",
"=",
"8",
",",
"color_map",
"=",
"None",
",",
"frameon",
"=",
"None",
")",
":",
"# dpi options (mpl default: 100, 100)",
"rcParams",
"[",
"'figure.dpi'",
"]",
"=",
"100",
"rcParams",
"[",
"'savefig.dpi'",
"]",
"=",
... | Set matplotlib.rcParams to scvelo defaults. | [
"Set",
"matplotlib",
".",
"rcParams",
"to",
"scvelo",
"defaults",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/settings.py#L84-L147 | train | 22,953 |
theislab/scvelo | scvelo/read_load.py | merge | def merge(adata, ldata, copy=True):
"""Merges two annotated data matrices.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix (reference data set).
ldata: :class:`~anndata.AnnData`
Annotated data matrix (to be merged into adata).
Returns
-------
Returns a :class:`~anndata.AnnData` object
"""
if 'spliced' in ldata.layers.keys() and 'initial_size_spliced' not in ldata.obs.keys(): set_initial_size(ldata)
elif 'spliced' in adata.layers.keys() and 'initial_size_spliced' not in adata.obs.keys(): set_initial_size(adata)
common_obs = adata.obs_names.intersection(ldata.obs_names)
common_vars = adata.var_names.intersection(ldata.var_names)
if len(common_obs) == 0:
clean_obs_names(adata)
clean_obs_names(ldata)
common_obs = adata.obs_names.intersection(ldata.obs_names)
if copy:
_adata = adata[common_obs].copy() if adata.shape[1] >= ldata.shape[1] else ldata[common_obs].copy()
_ldata = ldata[common_obs].copy() if adata.shape[1] >= ldata.shape[1] else adata[common_obs].copy()
else:
adata._inplace_subset_obs(common_obs)
_adata, _ldata = adata, ldata[common_obs]
same_vars = (len(_adata.var_names) == len(_ldata.var_names) and np.all(_adata.var_names == _ldata.var_names))
if len(common_vars) > 0 and not same_vars:
_adata._inplace_subset_var(common_vars)
_ldata._inplace_subset_var(common_vars)
for attr in _ldata.obs.keys():
_adata.obs[attr] = _ldata.obs[attr]
for attr in _ldata.obsm.keys():
_adata.obsm[attr] = _ldata.obsm[attr]
for attr in _ldata.uns.keys():
_adata.uns[attr] = _ldata.uns[attr]
for attr in _ldata.layers.keys():
_adata.layers[attr] = _ldata.layers[attr]
if _adata.shape[1] == _ldata.shape[1]:
same_vars = (len(_adata.var_names) == len(_ldata.var_names) and np.all(_adata.var_names == _ldata.var_names))
if same_vars:
for attr in _ldata.var.keys():
_adata.var[attr] = _ldata.var[attr]
for attr in _ldata.varm.keys():
_adata.varm[attr] = _ldata.varm[attr]
else:
raise ValueError('Variable names are not identical.')
return _adata if copy else None | python | def merge(adata, ldata, copy=True):
"""Merges two annotated data matrices.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix (reference data set).
ldata: :class:`~anndata.AnnData`
Annotated data matrix (to be merged into adata).
Returns
-------
Returns a :class:`~anndata.AnnData` object
"""
if 'spliced' in ldata.layers.keys() and 'initial_size_spliced' not in ldata.obs.keys(): set_initial_size(ldata)
elif 'spliced' in adata.layers.keys() and 'initial_size_spliced' not in adata.obs.keys(): set_initial_size(adata)
common_obs = adata.obs_names.intersection(ldata.obs_names)
common_vars = adata.var_names.intersection(ldata.var_names)
if len(common_obs) == 0:
clean_obs_names(adata)
clean_obs_names(ldata)
common_obs = adata.obs_names.intersection(ldata.obs_names)
if copy:
_adata = adata[common_obs].copy() if adata.shape[1] >= ldata.shape[1] else ldata[common_obs].copy()
_ldata = ldata[common_obs].copy() if adata.shape[1] >= ldata.shape[1] else adata[common_obs].copy()
else:
adata._inplace_subset_obs(common_obs)
_adata, _ldata = adata, ldata[common_obs]
same_vars = (len(_adata.var_names) == len(_ldata.var_names) and np.all(_adata.var_names == _ldata.var_names))
if len(common_vars) > 0 and not same_vars:
_adata._inplace_subset_var(common_vars)
_ldata._inplace_subset_var(common_vars)
for attr in _ldata.obs.keys():
_adata.obs[attr] = _ldata.obs[attr]
for attr in _ldata.obsm.keys():
_adata.obsm[attr] = _ldata.obsm[attr]
for attr in _ldata.uns.keys():
_adata.uns[attr] = _ldata.uns[attr]
for attr in _ldata.layers.keys():
_adata.layers[attr] = _ldata.layers[attr]
if _adata.shape[1] == _ldata.shape[1]:
same_vars = (len(_adata.var_names) == len(_ldata.var_names) and np.all(_adata.var_names == _ldata.var_names))
if same_vars:
for attr in _ldata.var.keys():
_adata.var[attr] = _ldata.var[attr]
for attr in _ldata.varm.keys():
_adata.varm[attr] = _ldata.varm[attr]
else:
raise ValueError('Variable names are not identical.')
return _adata if copy else None | [
"def",
"merge",
"(",
"adata",
",",
"ldata",
",",
"copy",
"=",
"True",
")",
":",
"if",
"'spliced'",
"in",
"ldata",
".",
"layers",
".",
"keys",
"(",
")",
"and",
"'initial_size_spliced'",
"not",
"in",
"ldata",
".",
"obs",
".",
"keys",
"(",
")",
":",
"... | Merges two annotated data matrices.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix (reference data set).
ldata: :class:`~anndata.AnnData`
Annotated data matrix (to be merged into adata).
Returns
-------
Returns a :class:`~anndata.AnnData` object | [
"Merges",
"two",
"annotated",
"data",
"matrices",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/read_load.py#L102-L158 | train | 22,954 |
theislab/scvelo | scvelo/tools/velocity_graph.py | velocity_graph | def velocity_graph(data, vkey='velocity', xkey='Ms', tkey=None, basis=None, n_neighbors=None, n_recurse_neighbors=None,
random_neighbors_at_max=None, sqrt_transform=False, approx=False, copy=False):
"""Computes velocity graph based on cosine similarities.
The cosine similarities are computed between velocities and potential cell state transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
n_neighbors: `int` or `None` (default: None)
Use fixed number of neighbors or do recursive neighbor search (if `None`).
n_recurse_neighbors: `int` (default: 2)
Number of recursions to be done for neighbors search.
random_neighbors_at_max: `int` or `None` (default: `None`)
If number of iterative neighbors for an individual cell is higher than this threshold,
a random selection of such are chosen as reference neighbors.
sqrt_transform: `bool` (default: `False`)
Whether to variance-transform the cell states changes and velocities before computing cosine similarities.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_graph: `.uns`
sparse matrix with transition probabilities
"""
adata = data.copy() if copy else data
if vkey not in adata.layers.keys(): velocity(adata, vkey=vkey)
vgraph = VelocityGraph(adata, vkey=vkey, xkey=xkey, tkey=tkey, basis=basis, n_neighbors=n_neighbors, approx=approx,
n_recurse_neighbors=n_recurse_neighbors, random_neighbors_at_max=random_neighbors_at_max,
sqrt_transform=sqrt_transform, report=True)
logg.info('computing velocity graph', r=True)
vgraph.compute_cosines()
adata.uns[vkey+'_graph'] = vgraph.graph
adata.uns[vkey+'_graph_neg'] = vgraph.graph_neg
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added \n'
' \'' + vkey + '_graph\', sparse matrix with cosine correlations (adata.uns)')
return adata if copy else None | python | def velocity_graph(data, vkey='velocity', xkey='Ms', tkey=None, basis=None, n_neighbors=None, n_recurse_neighbors=None,
random_neighbors_at_max=None, sqrt_transform=False, approx=False, copy=False):
"""Computes velocity graph based on cosine similarities.
The cosine similarities are computed between velocities and potential cell state transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
n_neighbors: `int` or `None` (default: None)
Use fixed number of neighbors or do recursive neighbor search (if `None`).
n_recurse_neighbors: `int` (default: 2)
Number of recursions to be done for neighbors search.
random_neighbors_at_max: `int` or `None` (default: `None`)
If number of iterative neighbors for an individual cell is higher than this threshold,
a random selection of such are chosen as reference neighbors.
sqrt_transform: `bool` (default: `False`)
Whether to variance-transform the cell states changes and velocities before computing cosine similarities.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_graph: `.uns`
sparse matrix with transition probabilities
"""
adata = data.copy() if copy else data
if vkey not in adata.layers.keys(): velocity(adata, vkey=vkey)
vgraph = VelocityGraph(adata, vkey=vkey, xkey=xkey, tkey=tkey, basis=basis, n_neighbors=n_neighbors, approx=approx,
n_recurse_neighbors=n_recurse_neighbors, random_neighbors_at_max=random_neighbors_at_max,
sqrt_transform=sqrt_transform, report=True)
logg.info('computing velocity graph', r=True)
vgraph.compute_cosines()
adata.uns[vkey+'_graph'] = vgraph.graph
adata.uns[vkey+'_graph_neg'] = vgraph.graph_neg
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added \n'
' \'' + vkey + '_graph\', sparse matrix with cosine correlations (adata.uns)')
return adata if copy else None | [
"def",
"velocity_graph",
"(",
"data",
",",
"vkey",
"=",
"'velocity'",
",",
"xkey",
"=",
"'Ms'",
",",
"tkey",
"=",
"None",
",",
"basis",
"=",
"None",
",",
"n_neighbors",
"=",
"None",
",",
"n_recurse_neighbors",
"=",
"None",
",",
"random_neighbors_at_max",
"... | Computes velocity graph based on cosine similarities.
The cosine similarities are computed between velocities and potential cell state transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
n_neighbors: `int` or `None` (default: None)
Use fixed number of neighbors or do recursive neighbor search (if `None`).
n_recurse_neighbors: `int` (default: 2)
Number of recursions to be done for neighbors search.
random_neighbors_at_max: `int` or `None` (default: `None`)
If number of iterative neighbors for an individual cell is higher than this threshold,
a random selection of such are chosen as reference neighbors.
sqrt_transform: `bool` (default: `False`)
Whether to variance-transform the cell states changes and velocities before computing cosine similarities.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_graph: `.uns`
sparse matrix with transition probabilities | [
"Computes",
"velocity",
"graph",
"based",
"on",
"cosine",
"similarities",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/velocity_graph.py#L120-L168 | train | 22,955 |
theislab/scvelo | scvelo/tools/optimization.py | optimize_NxN | def optimize_NxN(x, y, fit_offset=False, perc=None):
"""Just to compare with closed-form solution
"""
if perc is not None:
if not fit_offset and isinstance(perc, (list, tuple)): perc = perc[1]
weights = get_weight(x, y, perc).astype(bool)
if issparse(weights): weights = weights.A
else:
weights = None
x, y = x.astype(np.float64), y.astype(np.float64)
n_vars = x.shape[1]
offset, gamma = np.zeros(n_vars), np.zeros(n_vars)
for i in range(n_vars):
xi = x[:, i] if weights is None else x[:, i][weights[:, i]]
yi = y[:, i] if weights is None else y[:, i][weights[:, i]]
if fit_offset:
offset[i], gamma[i] = minimize(lambda m: np.sum((-yi + xi * m[1] + m[0])**2), method="L-BFGS-B",
x0=(0, 0.1), bounds=[(0, None), (None, None)]).x
else:
gamma[i] = minimize(lambda m: np.sum((-yi + xi * m) ** 2), x0=0.1, method="L-BFGS-B").x
offset[np.isnan(offset)], gamma[np.isnan(gamma)] = 0, 0
return offset, gamma | python | def optimize_NxN(x, y, fit_offset=False, perc=None):
"""Just to compare with closed-form solution
"""
if perc is not None:
if not fit_offset and isinstance(perc, (list, tuple)): perc = perc[1]
weights = get_weight(x, y, perc).astype(bool)
if issparse(weights): weights = weights.A
else:
weights = None
x, y = x.astype(np.float64), y.astype(np.float64)
n_vars = x.shape[1]
offset, gamma = np.zeros(n_vars), np.zeros(n_vars)
for i in range(n_vars):
xi = x[:, i] if weights is None else x[:, i][weights[:, i]]
yi = y[:, i] if weights is None else y[:, i][weights[:, i]]
if fit_offset:
offset[i], gamma[i] = minimize(lambda m: np.sum((-yi + xi * m[1] + m[0])**2), method="L-BFGS-B",
x0=(0, 0.1), bounds=[(0, None), (None, None)]).x
else:
gamma[i] = minimize(lambda m: np.sum((-yi + xi * m) ** 2), x0=0.1, method="L-BFGS-B").x
offset[np.isnan(offset)], gamma[np.isnan(gamma)] = 0, 0
return offset, gamma | [
"def",
"optimize_NxN",
"(",
"x",
",",
"y",
",",
"fit_offset",
"=",
"False",
",",
"perc",
"=",
"None",
")",
":",
"if",
"perc",
"is",
"not",
"None",
":",
"if",
"not",
"fit_offset",
"and",
"isinstance",
"(",
"perc",
",",
"(",
"list",
",",
"tuple",
")"... | Just to compare with closed-form solution | [
"Just",
"to",
"compare",
"with",
"closed",
"-",
"form",
"solution"
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/optimization.py#L55-L80 | train | 22,956 |
theislab/scvelo | scvelo/tools/velocity_confidence.py | velocity_confidence | def velocity_confidence(data, vkey='velocity', copy=False):
"""Computes confidences of velocities.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_length: `.obs`
Length of the velocity vectors for each individual cell
velocity_confidence: `.obs`
Confidence for each cell
"""
adata = data.copy() if copy else data
if vkey not in adata.layers.keys():
raise ValueError(
'You need to run `tl.velocity` first.')
idx = np.array(adata.var[vkey + '_genes'].values, dtype=bool)
X, V = adata.layers['Ms'][:, idx].copy(), adata.layers[vkey][:, idx].copy()
indices = get_indices(dist=adata.uns['neighbors']['distances'])[0]
V -= V.mean(1)[:, None]
V_norm = norm(V)
R = np.zeros(adata.n_obs)
for i in range(adata.n_obs):
Vi_neighs = V[indices[i]]
Vi_neighs -= Vi_neighs.mean(1)[:, None]
R[i] = np.mean(np.einsum('ij, j', Vi_neighs, V[i]) / (norm(Vi_neighs) * V_norm[i])[None, :])
adata.obs[vkey + '_length'] = V_norm.round(2)
adata.obs[vkey + '_confidence'] = R
logg.hint('added \'' + vkey + '_confidence\' (adata.obs)')
if vkey + '_confidence_transition' not in adata.obs.keys():
velocity_confidence_transition(adata, vkey)
return adata if copy else None | python | def velocity_confidence(data, vkey='velocity', copy=False):
"""Computes confidences of velocities.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_length: `.obs`
Length of the velocity vectors for each individual cell
velocity_confidence: `.obs`
Confidence for each cell
"""
adata = data.copy() if copy else data
if vkey not in adata.layers.keys():
raise ValueError(
'You need to run `tl.velocity` first.')
idx = np.array(adata.var[vkey + '_genes'].values, dtype=bool)
X, V = adata.layers['Ms'][:, idx].copy(), adata.layers[vkey][:, idx].copy()
indices = get_indices(dist=adata.uns['neighbors']['distances'])[0]
V -= V.mean(1)[:, None]
V_norm = norm(V)
R = np.zeros(adata.n_obs)
for i in range(adata.n_obs):
Vi_neighs = V[indices[i]]
Vi_neighs -= Vi_neighs.mean(1)[:, None]
R[i] = np.mean(np.einsum('ij, j', Vi_neighs, V[i]) / (norm(Vi_neighs) * V_norm[i])[None, :])
adata.obs[vkey + '_length'] = V_norm.round(2)
adata.obs[vkey + '_confidence'] = R
logg.hint('added \'' + vkey + '_confidence\' (adata.obs)')
if vkey + '_confidence_transition' not in adata.obs.keys():
velocity_confidence_transition(adata, vkey)
return adata if copy else None | [
"def",
"velocity_confidence",
"(",
"data",
",",
"vkey",
"=",
"'velocity'",
",",
"copy",
"=",
"False",
")",
":",
"adata",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"copy",
"else",
"data",
"if",
"vkey",
"not",
"in",
"adata",
".",
"layers",
".",
"keys",... | Computes confidences of velocities.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_length: `.obs`
Length of the velocity vectors for each individual cell
velocity_confidence: `.obs`
Confidence for each cell | [
"Computes",
"confidences",
"of",
"velocities",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/velocity_confidence.py#L10-L56 | train | 22,957 |
theislab/scvelo | scvelo/tools/velocity_confidence.py | velocity_confidence_transition | def velocity_confidence_transition(data, vkey='velocity', scale=10, copy=False):
"""Computes confidences of velocity transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_confidence_transition: `.obs`
Confidence of transition for each cell
"""
adata = data.copy() if copy else data
if vkey not in adata.layers.keys():
raise ValueError(
'You need to run `tl.velocity` first.')
idx = np.array(adata.var[vkey + '_genes'].values, dtype=bool)
T = transition_matrix(adata, vkey=vkey, scale=scale)
dX = T.dot(adata.layers['Ms'][:, idx]) - adata.layers['Ms'][:, idx]
dX -= dX.mean(1)[:, None]
V = adata.layers[vkey][:, idx].copy()
V -= V.mean(1)[:, None]
adata.obs[vkey + '_confidence_transition'] = prod_sum_var(dX, V) / (norm(dX) * norm(V))
logg.hint('added \'' + vkey + '_confidence_transition\' (adata.obs)')
return adata if copy else None | python | def velocity_confidence_transition(data, vkey='velocity', scale=10, copy=False):
"""Computes confidences of velocity transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_confidence_transition: `.obs`
Confidence of transition for each cell
"""
adata = data.copy() if copy else data
if vkey not in adata.layers.keys():
raise ValueError(
'You need to run `tl.velocity` first.')
idx = np.array(adata.var[vkey + '_genes'].values, dtype=bool)
T = transition_matrix(adata, vkey=vkey, scale=scale)
dX = T.dot(adata.layers['Ms'][:, idx]) - adata.layers['Ms'][:, idx]
dX -= dX.mean(1)[:, None]
V = adata.layers[vkey][:, idx].copy()
V -= V.mean(1)[:, None]
adata.obs[vkey + '_confidence_transition'] = prod_sum_var(dX, V) / (norm(dX) * norm(V))
logg.hint('added \'' + vkey + '_confidence_transition\' (adata.obs)')
return adata if copy else None | [
"def",
"velocity_confidence_transition",
"(",
"data",
",",
"vkey",
"=",
"'velocity'",
",",
"scale",
"=",
"10",
",",
"copy",
"=",
"False",
")",
":",
"adata",
"=",
"data",
".",
"copy",
"(",
")",
"if",
"copy",
"else",
"data",
"if",
"vkey",
"not",
"in",
... | Computes confidences of velocity transitions.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
velocity_confidence_transition: `.obs`
Confidence of transition for each cell | [
"Computes",
"confidences",
"of",
"velocity",
"transitions",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/velocity_confidence.py#L59-L96 | train | 22,958 |
theislab/scvelo | scvelo/tools/terminal_states.py | cell_fate | def cell_fate(data, groupby='clusters', disconnected_groups=None, self_transitions=False, n_neighbors=None, copy=False):
"""Computes individual cell endpoints
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
groupby: `str` (default: `'clusters'`)
Key to which to assign the fates.
disconnected_groups: list of `str` (default: `None`)
Which groups to treat as disconnected for fate assignment.
n_neighbors: `int` (default: `None`)
Number of neighbors to restrict transitions to.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
cell_fate: `.obs`
most likely cell fate for each individual cell
cell_fate_confidence: `.obs`
confidence of transitioning to the assigned fate
"""
adata = data.copy() if copy else data
logg.info('computing cell fates', r=True)
n_neighbors = 10 if n_neighbors is None else n_neighbors
_adata = adata.copy()
vgraph = VelocityGraph(_adata, n_neighbors=n_neighbors, approx=True, n_recurse_neighbors=1)
vgraph.compute_cosines()
_adata.uns['velocity_graph'] = vgraph.graph
_adata.uns['velocity_graph_neg'] = vgraph.graph_neg
T = transition_matrix(_adata, self_transitions=self_transitions)
I = np.eye(_adata.n_obs)
fate = np.linalg.inv(I - T)
if issparse(T): fate = fate.A
cell_fates = np.array(_adata.obs[groupby][fate.argmax(1)])
if disconnected_groups is not None:
idx = _adata.obs[groupby].isin(disconnected_groups)
cell_fates[idx] = _adata.obs[groupby][idx]
adata.obs['cell_fate'] = cell_fates
adata.obs['cell_fate_confidence'] = fate.max(1) / fate.sum(1)
strings_to_categoricals(adata)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added\n'
' \'cell_fate\', most likely cell fate (adata.obs)\n'
' \'cell_fate_confidence\', confidence of transitioning to the assigned fate (adata.obs)')
return adata if copy else None | python | def cell_fate(data, groupby='clusters', disconnected_groups=None, self_transitions=False, n_neighbors=None, copy=False):
"""Computes individual cell endpoints
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
groupby: `str` (default: `'clusters'`)
Key to which to assign the fates.
disconnected_groups: list of `str` (default: `None`)
Which groups to treat as disconnected for fate assignment.
n_neighbors: `int` (default: `None`)
Number of neighbors to restrict transitions to.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
cell_fate: `.obs`
most likely cell fate for each individual cell
cell_fate_confidence: `.obs`
confidence of transitioning to the assigned fate
"""
adata = data.copy() if copy else data
logg.info('computing cell fates', r=True)
n_neighbors = 10 if n_neighbors is None else n_neighbors
_adata = adata.copy()
vgraph = VelocityGraph(_adata, n_neighbors=n_neighbors, approx=True, n_recurse_neighbors=1)
vgraph.compute_cosines()
_adata.uns['velocity_graph'] = vgraph.graph
_adata.uns['velocity_graph_neg'] = vgraph.graph_neg
T = transition_matrix(_adata, self_transitions=self_transitions)
I = np.eye(_adata.n_obs)
fate = np.linalg.inv(I - T)
if issparse(T): fate = fate.A
cell_fates = np.array(_adata.obs[groupby][fate.argmax(1)])
if disconnected_groups is not None:
idx = _adata.obs[groupby].isin(disconnected_groups)
cell_fates[idx] = _adata.obs[groupby][idx]
adata.obs['cell_fate'] = cell_fates
adata.obs['cell_fate_confidence'] = fate.max(1) / fate.sum(1)
strings_to_categoricals(adata)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added\n'
' \'cell_fate\', most likely cell fate (adata.obs)\n'
' \'cell_fate_confidence\', confidence of transitioning to the assigned fate (adata.obs)')
return adata if copy else None | [
"def",
"cell_fate",
"(",
"data",
",",
"groupby",
"=",
"'clusters'",
",",
"disconnected_groups",
"=",
"None",
",",
"self_transitions",
"=",
"False",
",",
"n_neighbors",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"adata",
"=",
"data",
".",
"copy",
"... | Computes individual cell endpoints
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
groupby: `str` (default: `'clusters'`)
Key to which to assign the fates.
disconnected_groups: list of `str` (default: `None`)
Which groups to treat as disconnected for fate assignment.
n_neighbors: `int` (default: `None`)
Number of neighbors to restrict transitions to.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
cell_fate: `.obs`
most likely cell fate for each individual cell
cell_fate_confidence: `.obs`
confidence of transitioning to the assigned fate | [
"Computes",
"individual",
"cell",
"endpoints"
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/terminal_states.py#L12-L64 | train | 22,959 |
theislab/scvelo | scvelo/preprocessing/moments.py | moments | def moments(data, n_neighbors=30, n_pcs=30, mode='connectivities', method='umap', metric='euclidean', use_rep=None,
recurse_neighbors=False, renormalize=False, copy=False):
"""Computes moments for velocity estimation.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
n_neighbors: `int` (default: 30)
Number of neighbors to use.
n_pcs: `int` (default: 30)
Number of principal components to use.
mode: `'connectivities'` or `'distances'` (default: `'connectivities'`)
Distance metric to use for moment computation.
renormalize: `bool` (default: `False`)
Renormalize the moments by total counts per cell to its median.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
Ms: `.layers`
dense matrix with first order moments of spliced counts.
Mu: `.layers`
dense matrix with first order moments of unspliced counts.
"""
adata = data.copy() if copy else data
if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys():
raise ValueError('Could not find spliced / unspliced counts.')
if any([not_yet_normalized(adata.layers[layer]) for layer in {'spliced', 'unspliced'}]):
normalize_per_cell(adata)
if 'neighbors' not in adata.uns.keys() or neighbors_to_be_recomputed(adata, n_neighbors=n_neighbors):
if use_rep is None: use_rep = 'X_pca'
neighbors(adata, n_neighbors=n_neighbors, use_rep=use_rep, n_pcs=n_pcs, method=method, metric=metric)
if mode not in adata.uns['neighbors']:
raise ValueError('mode can only be \'connectivities\' or \'distances\'')
logg.info('computing moments based on ' + str(mode), r=True)
connectivities = get_connectivities(adata, mode, n_neighbors=n_neighbors, recurse_neighbors=recurse_neighbors)
adata.layers['Ms'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['spliced'])).astype(np.float32).A
adata.layers['Mu'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['unspliced'])).astype(np.float32).A
if renormalize: normalize_per_cell(adata, layers={'Ms', 'Mu'}, enforce=True)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added \n'
' \'Ms\' and \'Mu\', moments of spliced/unspliced abundances (adata.layers)')
return adata if copy else None | python | def moments(data, n_neighbors=30, n_pcs=30, mode='connectivities', method='umap', metric='euclidean', use_rep=None,
recurse_neighbors=False, renormalize=False, copy=False):
"""Computes moments for velocity estimation.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
n_neighbors: `int` (default: 30)
Number of neighbors to use.
n_pcs: `int` (default: 30)
Number of principal components to use.
mode: `'connectivities'` or `'distances'` (default: `'connectivities'`)
Distance metric to use for moment computation.
renormalize: `bool` (default: `False`)
Renormalize the moments by total counts per cell to its median.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
Ms: `.layers`
dense matrix with first order moments of spliced counts.
Mu: `.layers`
dense matrix with first order moments of unspliced counts.
"""
adata = data.copy() if copy else data
if 'spliced' not in adata.layers.keys() or 'unspliced' not in adata.layers.keys():
raise ValueError('Could not find spliced / unspliced counts.')
if any([not_yet_normalized(adata.layers[layer]) for layer in {'spliced', 'unspliced'}]):
normalize_per_cell(adata)
if 'neighbors' not in adata.uns.keys() or neighbors_to_be_recomputed(adata, n_neighbors=n_neighbors):
if use_rep is None: use_rep = 'X_pca'
neighbors(adata, n_neighbors=n_neighbors, use_rep=use_rep, n_pcs=n_pcs, method=method, metric=metric)
if mode not in adata.uns['neighbors']:
raise ValueError('mode can only be \'connectivities\' or \'distances\'')
logg.info('computing moments based on ' + str(mode), r=True)
connectivities = get_connectivities(adata, mode, n_neighbors=n_neighbors, recurse_neighbors=recurse_neighbors)
adata.layers['Ms'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['spliced'])).astype(np.float32).A
adata.layers['Mu'] = csr_matrix.dot(connectivities, csr_matrix(adata.layers['unspliced'])).astype(np.float32).A
if renormalize: normalize_per_cell(adata, layers={'Ms', 'Mu'}, enforce=True)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added \n'
' \'Ms\' and \'Mu\', moments of spliced/unspliced abundances (adata.layers)')
return adata if copy else None | [
"def",
"moments",
"(",
"data",
",",
"n_neighbors",
"=",
"30",
",",
"n_pcs",
"=",
"30",
",",
"mode",
"=",
"'connectivities'",
",",
"method",
"=",
"'umap'",
",",
"metric",
"=",
"'euclidean'",
",",
"use_rep",
"=",
"None",
",",
"recurse_neighbors",
"=",
"Fal... | Computes moments for velocity estimation.
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
n_neighbors: `int` (default: 30)
Number of neighbors to use.
n_pcs: `int` (default: 30)
Number of principal components to use.
mode: `'connectivities'` or `'distances'` (default: `'connectivities'`)
Distance metric to use for moment computation.
renormalize: `bool` (default: `False`)
Renormalize the moments by total counts per cell to its median.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
Ms: `.layers`
dense matrix with first order moments of spliced counts.
Mu: `.layers`
dense matrix with first order moments of unspliced counts. | [
"Computes",
"moments",
"for",
"velocity",
"estimation",
"."
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/preprocessing/moments.py#L10-L61 | train | 22,960 |
theislab/scvelo | scvelo/tools/transition_matrix.py | transition_matrix | def transition_matrix(adata, vkey='velocity', basis=None, backward=False, self_transitions=True, scale=10, perc=None,
use_negative_cosines=False, weight_diffusion=0, scale_diffusion=1, weight_indirect_neighbors=None,
n_neighbors=None, vgraph=None):
"""Computes transition probabilities from velocity graph
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
basis: `str` or `None` (default: `None`)
Restrict transition to embedding if specified
backward: `bool` (default: `False`)
Whether to use the transition matrix to push forward (`False`) or to pull backward (`True`)
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
weight_diffusion: `float` (default: 0)
Relative weight to be given to diffusion kernel (Brownian motion)
scale_diffusion: `float` (default: 1)
Scale of diffusion kernel.
Returns
-------
Returns sparse matrix with transition probabilities.
"""
if vkey+'_graph' not in adata.uns:
raise ValueError('You need to run `tl.velocity_graph` first to compute cosine correlations.')
graph = csr_matrix(adata.uns[vkey + '_graph']).copy() if vgraph is None else vgraph.copy()
if self_transitions:
confidence = graph.max(1).A.flatten()
ub = np.percentile(confidence, 98)
self_prob = np.clip(ub - confidence, 0, 1)
graph.setdiag(self_prob)
T = np.expm1(graph * scale) # equivalent to np.exp(graph.A * scale) - 1
if vkey + '_graph_neg' in adata.uns.keys():
graph_neg = adata.uns[vkey + '_graph_neg']
if use_negative_cosines:
T -= np.expm1(-graph_neg * scale)
else:
T += np.expm1(graph_neg * scale)
T.data += 1
# weight direct and indirect (recursed) neighbors
if 'neighbors' in adata.uns.keys() and weight_indirect_neighbors is not None and weight_indirect_neighbors < 1:
direct_neighbors = adata.uns['neighbors']['distances'] > 0
direct_neighbors.setdiag(1)
w = weight_indirect_neighbors
T = w * T + (1-w) * direct_neighbors.multiply(T)
if backward: T = T.T
T = normalize(T)
if n_neighbors is not None:
T = T.multiply(get_connectivities(adata, mode='distances', n_neighbors=n_neighbors, recurse_neighbors=True))
if perc is not None:
threshold = np.percentile(T.data, perc)
T.data[T.data < threshold] = 0
T.eliminate_zeros()
if 'X_' + str(basis) in adata.obsm.keys():
dists_emb = (T > 0).multiply(squareform(pdist(adata.obsm['X_' + basis])))
scale_diffusion *= dists_emb.data.mean()
diffusion_kernel = dists_emb.copy()
diffusion_kernel.data = np.exp(-.5 * dists_emb.data ** 2 / scale_diffusion ** 2)
T = T.multiply(diffusion_kernel) # combine velocity based kernel with diffusion based kernel
if 0 < weight_diffusion < 1: # add another diffusion kernel (Brownian motion - like)
diffusion_kernel.data = np.exp(-.5 * dists_emb.data ** 2 / (scale_diffusion/2) ** 2)
T = (1-weight_diffusion) * T + weight_diffusion * diffusion_kernel
T = normalize(T)
return T | python | def transition_matrix(adata, vkey='velocity', basis=None, backward=False, self_transitions=True, scale=10, perc=None,
use_negative_cosines=False, weight_diffusion=0, scale_diffusion=1, weight_indirect_neighbors=None,
n_neighbors=None, vgraph=None):
"""Computes transition probabilities from velocity graph
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
basis: `str` or `None` (default: `None`)
Restrict transition to embedding if specified
backward: `bool` (default: `False`)
Whether to use the transition matrix to push forward (`False`) or to pull backward (`True`)
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
weight_diffusion: `float` (default: 0)
Relative weight to be given to diffusion kernel (Brownian motion)
scale_diffusion: `float` (default: 1)
Scale of diffusion kernel.
Returns
-------
Returns sparse matrix with transition probabilities.
"""
if vkey+'_graph' not in adata.uns:
raise ValueError('You need to run `tl.velocity_graph` first to compute cosine correlations.')
graph = csr_matrix(adata.uns[vkey + '_graph']).copy() if vgraph is None else vgraph.copy()
if self_transitions:
confidence = graph.max(1).A.flatten()
ub = np.percentile(confidence, 98)
self_prob = np.clip(ub - confidence, 0, 1)
graph.setdiag(self_prob)
T = np.expm1(graph * scale) # equivalent to np.exp(graph.A * scale) - 1
if vkey + '_graph_neg' in adata.uns.keys():
graph_neg = adata.uns[vkey + '_graph_neg']
if use_negative_cosines:
T -= np.expm1(-graph_neg * scale)
else:
T += np.expm1(graph_neg * scale)
T.data += 1
# weight direct and indirect (recursed) neighbors
if 'neighbors' in adata.uns.keys() and weight_indirect_neighbors is not None and weight_indirect_neighbors < 1:
direct_neighbors = adata.uns['neighbors']['distances'] > 0
direct_neighbors.setdiag(1)
w = weight_indirect_neighbors
T = w * T + (1-w) * direct_neighbors.multiply(T)
if backward: T = T.T
T = normalize(T)
if n_neighbors is not None:
T = T.multiply(get_connectivities(adata, mode='distances', n_neighbors=n_neighbors, recurse_neighbors=True))
if perc is not None:
threshold = np.percentile(T.data, perc)
T.data[T.data < threshold] = 0
T.eliminate_zeros()
if 'X_' + str(basis) in adata.obsm.keys():
dists_emb = (T > 0).multiply(squareform(pdist(adata.obsm['X_' + basis])))
scale_diffusion *= dists_emb.data.mean()
diffusion_kernel = dists_emb.copy()
diffusion_kernel.data = np.exp(-.5 * dists_emb.data ** 2 / scale_diffusion ** 2)
T = T.multiply(diffusion_kernel) # combine velocity based kernel with diffusion based kernel
if 0 < weight_diffusion < 1: # add another diffusion kernel (Brownian motion - like)
diffusion_kernel.data = np.exp(-.5 * dists_emb.data ** 2 / (scale_diffusion/2) ** 2)
T = (1-weight_diffusion) * T + weight_diffusion * diffusion_kernel
T = normalize(T)
return T | [
"def",
"transition_matrix",
"(",
"adata",
",",
"vkey",
"=",
"'velocity'",
",",
"basis",
"=",
"None",
",",
"backward",
"=",
"False",
",",
"self_transitions",
"=",
"True",
",",
"scale",
"=",
"10",
",",
"perc",
"=",
"None",
",",
"use_negative_cosines",
"=",
... | Computes transition probabilities from velocity graph
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name of velocity estimates to be used.
basis: `str` or `None` (default: `None`)
Restrict transition to embedding if specified
backward: `bool` (default: `False`)
Whether to use the transition matrix to push forward (`False`) or to pull backward (`True`)
scale: `float` (default: 10)
Scale parameter of gaussian kernel.
weight_diffusion: `float` (default: 0)
Relative weight to be given to diffusion kernel (Brownian motion)
scale_diffusion: `float` (default: 1)
Scale of diffusion kernel.
Returns
-------
Returns sparse matrix with transition probabilities. | [
"Computes",
"transition",
"probabilities",
"from",
"velocity",
"graph"
] | c7a96d70edfe705e86bf364434a9527d4fd8df11 | https://github.com/theislab/scvelo/blob/c7a96d70edfe705e86bf364434a9527d4fd8df11/scvelo/tools/transition_matrix.py#L9-L87 | train | 22,961 |
rochars/trade | trade/context.py | Context.apply | def apply(self):
"""Apply the rules of the context to its occurrences.
This method executes all the functions defined in
self.tasks in the order they are listed.
Every function that acts as a context task receives the
Context object itself as its only argument.
The contextualized occurrences are then stored in
Context.contextualized.
The original Occurrence instances are not modified.
"""
raw_operations = copy.deepcopy(self.occurrences)
for task in self.tasks:
task(self)
self.occurrences = raw_operations | python | def apply(self):
"""Apply the rules of the context to its occurrences.
This method executes all the functions defined in
self.tasks in the order they are listed.
Every function that acts as a context task receives the
Context object itself as its only argument.
The contextualized occurrences are then stored in
Context.contextualized.
The original Occurrence instances are not modified.
"""
raw_operations = copy.deepcopy(self.occurrences)
for task in self.tasks:
task(self)
self.occurrences = raw_operations | [
"def",
"apply",
"(",
"self",
")",
":",
"raw_operations",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"occurrences",
")",
"for",
"task",
"in",
"self",
".",
"tasks",
":",
"task",
"(",
"self",
")",
"self",
".",
"occurrences",
"=",
"raw_operations"
] | Apply the rules of the context to its occurrences.
This method executes all the functions defined in
self.tasks in the order they are listed.
Every function that acts as a context task receives the
Context object itself as its only argument.
The contextualized occurrences are then stored in
Context.contextualized.
The original Occurrence instances are not modified. | [
"Apply",
"the",
"rules",
"of",
"the",
"context",
"to",
"its",
"occurrences",
"."
] | 3b8d386e1394923919b7dc7a30dc93441558d5bc | https://github.com/rochars/trade/blob/3b8d386e1394923919b7dc7a30dc93441558d5bc/trade/context.py#L70-L87 | train | 22,962 |
rochars/trade | trade/occurrence.py | average_price | def average_price(quantity_1, price_1, quantity_2, price_2):
"""Calculates the average price between two asset states."""
return (quantity_1 * price_1 + quantity_2 * price_2) / \
(quantity_1 + quantity_2) | python | def average_price(quantity_1, price_1, quantity_2, price_2):
"""Calculates the average price between two asset states."""
return (quantity_1 * price_1 + quantity_2 * price_2) / \
(quantity_1 + quantity_2) | [
"def",
"average_price",
"(",
"quantity_1",
",",
"price_1",
",",
"quantity_2",
",",
"price_2",
")",
":",
"return",
"(",
"quantity_1",
"*",
"price_1",
"+",
"quantity_2",
"*",
"price_2",
")",
"/",
"(",
"quantity_1",
"+",
"quantity_2",
")"
] | Calculates the average price between two asset states. | [
"Calculates",
"the",
"average",
"price",
"between",
"two",
"asset",
"states",
"."
] | 3b8d386e1394923919b7dc7a30dc93441558d5bc | https://github.com/rochars/trade/blob/3b8d386e1394923919b7dc7a30dc93441558d5bc/trade/occurrence.py#L134-L137 | train | 22,963 |
rochars/trade | trade/occurrence.py | Occurrence.update_holder | def update_holder(self, holder):
"""Udpate the Holder state according to the occurrence.
This implementation is a example of how a Occurrence object
can update the Holder state; this method should be overriden
by classes that inherit from the Occurrence class.
This sample implementation simply update the quantity and the average
price of the Subject in the Holder's possession every time objects
from this class are passed to Holder.trade().
This sample implementation considers the following signature for
the Holder.state dict:
.. code:: python
{
"SUBJECT SYMBOL": {
"quantity": 0,
"value": 0
}
}
And the following signature for the Occurrance.details dict:
.. code:: python
{
"quantity": 0,
"value": 0
}
"""
subject_symbol = self.subject.symbol
# If the Holder already have a state regarding this Subject,
# update that state
if subject_symbol in holder.state:
# If the Holder have zero units of this subject, the average
# value paid/received for the subject is the value of the trade itself
if not holder.state[subject_symbol]['quantity']:
holder.state[subject_symbol]['value'] = self.details['value']
# If the Holder owns units of this subject then the average value
# paid/received for the subject may need to be updated with
# this occurrence details
# If the occurrence have the same sign as the quantity in the Holder
# state, a new average value needs to be calculated for the subject
elif same_sign(
holder.state[subject_symbol]['quantity'],
self.details['quantity']):
holder.state[subject_symbol]['value'] = average_price(
holder.state[subject_symbol]['quantity'],
holder.state[subject_symbol]['value'],
self.details['quantity'],
self.details['value']
)
# If the occurrence does not have the same sign of the quantity in the
# Holder state, then do other stuff.
# A trade app would normally implement some sort of profit/loss logic
# here.
# This sample implementation only checks if the average value
# of the subject needs to be updated and then update it as needed.
else:
if same_sign(
self.details['quantity'],
holder.state[subject_symbol]['quantity'] + self.details['quantity']):
holder.state[subject_symbol]['value'] = self.details['value']
# Update the quantity of the subject in the Holder's posession
holder.state[subject_symbol]['quantity'] += self.details['quantity']
# If the Holder don't have a state with this occurrence's Subject,
# then register this occurrence as the first state of the Subject
# in the Holder's possession
else:
holder.state[subject_symbol] = {
'quantity': self.details['quantity'],
'value': self.details['value']
}
# If the Holder knows about this Subject but don't have any unit
# of it, the paid value of the subject in the Holder state should
# be zero.
if not holder.state[subject_symbol]['quantity']:
holder.state[subject_symbol]['value'] = 0 | python | def update_holder(self, holder):
"""Udpate the Holder state according to the occurrence.
This implementation is a example of how a Occurrence object
can update the Holder state; this method should be overriden
by classes that inherit from the Occurrence class.
This sample implementation simply update the quantity and the average
price of the Subject in the Holder's possession every time objects
from this class are passed to Holder.trade().
This sample implementation considers the following signature for
the Holder.state dict:
.. code:: python
{
"SUBJECT SYMBOL": {
"quantity": 0,
"value": 0
}
}
And the following signature for the Occurrance.details dict:
.. code:: python
{
"quantity": 0,
"value": 0
}
"""
subject_symbol = self.subject.symbol
# If the Holder already have a state regarding this Subject,
# update that state
if subject_symbol in holder.state:
# If the Holder have zero units of this subject, the average
# value paid/received for the subject is the value of the trade itself
if not holder.state[subject_symbol]['quantity']:
holder.state[subject_symbol]['value'] = self.details['value']
# If the Holder owns units of this subject then the average value
# paid/received for the subject may need to be updated with
# this occurrence details
# If the occurrence have the same sign as the quantity in the Holder
# state, a new average value needs to be calculated for the subject
elif same_sign(
holder.state[subject_symbol]['quantity'],
self.details['quantity']):
holder.state[subject_symbol]['value'] = average_price(
holder.state[subject_symbol]['quantity'],
holder.state[subject_symbol]['value'],
self.details['quantity'],
self.details['value']
)
# If the occurrence does not have the same sign of the quantity in the
# Holder state, then do other stuff.
# A trade app would normally implement some sort of profit/loss logic
# here.
# This sample implementation only checks if the average value
# of the subject needs to be updated and then update it as needed.
else:
if same_sign(
self.details['quantity'],
holder.state[subject_symbol]['quantity'] + self.details['quantity']):
holder.state[subject_symbol]['value'] = self.details['value']
# Update the quantity of the subject in the Holder's posession
holder.state[subject_symbol]['quantity'] += self.details['quantity']
# If the Holder don't have a state with this occurrence's Subject,
# then register this occurrence as the first state of the Subject
# in the Holder's possession
else:
holder.state[subject_symbol] = {
'quantity': self.details['quantity'],
'value': self.details['value']
}
# If the Holder knows about this Subject but don't have any unit
# of it, the paid value of the subject in the Holder state should
# be zero.
if not holder.state[subject_symbol]['quantity']:
holder.state[subject_symbol]['value'] = 0 | [
"def",
"update_holder",
"(",
"self",
",",
"holder",
")",
":",
"subject_symbol",
"=",
"self",
".",
"subject",
".",
"symbol",
"# If the Holder already have a state regarding this Subject,",
"# update that state",
"if",
"subject_symbol",
"in",
"holder",
".",
"state",
":",
... | Udpate the Holder state according to the occurrence.
This implementation is a example of how a Occurrence object
can update the Holder state; this method should be overriden
by classes that inherit from the Occurrence class.
This sample implementation simply update the quantity and the average
price of the Subject in the Holder's possession every time objects
from this class are passed to Holder.trade().
This sample implementation considers the following signature for
the Holder.state dict:
.. code:: python
{
"SUBJECT SYMBOL": {
"quantity": 0,
"value": 0
}
}
And the following signature for the Occurrance.details dict:
.. code:: python
{
"quantity": 0,
"value": 0
} | [
"Udpate",
"the",
"Holder",
"state",
"according",
"to",
"the",
"occurrence",
"."
] | 3b8d386e1394923919b7dc7a30dc93441558d5bc | https://github.com/rochars/trade/blob/3b8d386e1394923919b7dc7a30dc93441558d5bc/trade/occurrence.py#L44-L132 | train | 22,964 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.fitter | def fitter(self, n=0, ftype="real", colfac=1.0e-8, lmfac=1.0e-3):
"""Create a sub-fitter.
The created sub-fitter can be used in the same way as a fitter
default fitter. This function returns an identification, which has to
be used in the `fid` argument of subsequent calls. The call can
specify the standard constructor arguments (`n`, `type`, `colfac`,
`lmfac`), or can specify them later in a :meth:`set` statement.
:param n: number of unknowns
:param ftype: type of solution
Allowed: real, complex, separable, asreal, conjugate
:param colfac: collinearity factor
:param lmfac: Levenberg-Marquardt factor
:param fid: the id of a sub-fitter
"""
fid = self._fitproxy.getid()
ftype = self._gettype(ftype)
n = len(self._fitids)
if 0 <= fid < n:
self._fitids[fid] = {}
elif fid == n:
self._fitids.append({})
else:
# shouldn't happen
raise RangeError("fit id out of range")
self.init(n=n, ftype=ftype, colfac=colfac, lmfac=lmfac, fid=fid)
return fid | python | def fitter(self, n=0, ftype="real", colfac=1.0e-8, lmfac=1.0e-3):
"""Create a sub-fitter.
The created sub-fitter can be used in the same way as a fitter
default fitter. This function returns an identification, which has to
be used in the `fid` argument of subsequent calls. The call can
specify the standard constructor arguments (`n`, `type`, `colfac`,
`lmfac`), or can specify them later in a :meth:`set` statement.
:param n: number of unknowns
:param ftype: type of solution
Allowed: real, complex, separable, asreal, conjugate
:param colfac: collinearity factor
:param lmfac: Levenberg-Marquardt factor
:param fid: the id of a sub-fitter
"""
fid = self._fitproxy.getid()
ftype = self._gettype(ftype)
n = len(self._fitids)
if 0 <= fid < n:
self._fitids[fid] = {}
elif fid == n:
self._fitids.append({})
else:
# shouldn't happen
raise RangeError("fit id out of range")
self.init(n=n, ftype=ftype, colfac=colfac, lmfac=lmfac, fid=fid)
return fid | [
"def",
"fitter",
"(",
"self",
",",
"n",
"=",
"0",
",",
"ftype",
"=",
"\"real\"",
",",
"colfac",
"=",
"1.0e-8",
",",
"lmfac",
"=",
"1.0e-3",
")",
":",
"fid",
"=",
"self",
".",
"_fitproxy",
".",
"getid",
"(",
")",
"ftype",
"=",
"self",
".",
"_getty... | Create a sub-fitter.
The created sub-fitter can be used in the same way as a fitter
default fitter. This function returns an identification, which has to
be used in the `fid` argument of subsequent calls. The call can
specify the standard constructor arguments (`n`, `type`, `colfac`,
`lmfac`), or can specify them later in a :meth:`set` statement.
:param n: number of unknowns
:param ftype: type of solution
Allowed: real, complex, separable, asreal, conjugate
:param colfac: collinearity factor
:param lmfac: Levenberg-Marquardt factor
:param fid: the id of a sub-fitter | [
"Create",
"a",
"sub",
"-",
"fitter",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L46-L74 | train | 22,965 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.done | def done(self, fid=0):
"""Terminates the fitserver."""
self._checkid(fid)
self._fitids[fid] = {}
self._fitproxy.done(fid) | python | def done(self, fid=0):
"""Terminates the fitserver."""
self._checkid(fid)
self._fitids[fid] = {}
self._fitproxy.done(fid) | [
"def",
"done",
"(",
"self",
",",
"fid",
"=",
"0",
")",
":",
"self",
".",
"_checkid",
"(",
"fid",
")",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"=",
"{",
"}",
"self",
".",
"_fitproxy",
".",
"done",
"(",
"fid",
")"
] | Terminates the fitserver. | [
"Terminates",
"the",
"fitserver",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L183-L187 | train | 22,966 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.reset | def reset(self, fid=0):
"""Reset the object's resources to its initialized state.
:param fid: the id of a sub-fitter
"""
self._checkid(fid)
self._fitids[fid]["solved"] = False
self._fitids[fid]["haserr"] = False
if not self._fitids[fid]["looped"]:
return self._fitproxy.reset(fid)
else:
self._fitids[fid]["looped"] = False
return True | python | def reset(self, fid=0):
"""Reset the object's resources to its initialized state.
:param fid: the id of a sub-fitter
"""
self._checkid(fid)
self._fitids[fid]["solved"] = False
self._fitids[fid]["haserr"] = False
if not self._fitids[fid]["looped"]:
return self._fitproxy.reset(fid)
else:
self._fitids[fid]["looped"] = False
return True | [
"def",
"reset",
"(",
"self",
",",
"fid",
"=",
"0",
")",
":",
"self",
".",
"_checkid",
"(",
"fid",
")",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"solved\"",
"]",
"=",
"False",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"haserr\"",
"]",... | Reset the object's resources to its initialized state.
:param fid: the id of a sub-fitter | [
"Reset",
"the",
"object",
"s",
"resources",
"to",
"its",
"initialized",
"state",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L189-L201 | train | 22,967 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.addconstraint | def addconstraint(self, x, y=0, fnct=None, fid=0):
"""Add constraint."""
self._checkid(fid)
i = 0
if "constraint" in self._fitids[fid]:
i = len(self._fitids[fid]["constraint"])
else:
self._fitids[fid]["constraint"] = {}
# dict key needs to be string
i = str(i)
self._fitids[fid]["constraint"][i] = {}
if isinstance(fnct, functional):
self._fitids[fid]["constraint"][i]["fnct"] = fnct.todict()
else:
self._fitids[fid]["constraint"][i]["fnct"] = \
functional("hyper", len(x)).todict()
self._fitids[fid]["constraint"][i]["x"] = [float(v) for v in x]
self._fitids[fid]["constraint"][i]["y"] = float(y)
six.print_(self._fitids[fid]["constraint"]) | python | def addconstraint(self, x, y=0, fnct=None, fid=0):
"""Add constraint."""
self._checkid(fid)
i = 0
if "constraint" in self._fitids[fid]:
i = len(self._fitids[fid]["constraint"])
else:
self._fitids[fid]["constraint"] = {}
# dict key needs to be string
i = str(i)
self._fitids[fid]["constraint"][i] = {}
if isinstance(fnct, functional):
self._fitids[fid]["constraint"][i]["fnct"] = fnct.todict()
else:
self._fitids[fid]["constraint"][i]["fnct"] = \
functional("hyper", len(x)).todict()
self._fitids[fid]["constraint"][i]["x"] = [float(v) for v in x]
self._fitids[fid]["constraint"][i]["y"] = float(y)
six.print_(self._fitids[fid]["constraint"]) | [
"def",
"addconstraint",
"(",
"self",
",",
"x",
",",
"y",
"=",
"0",
",",
"fnct",
"=",
"None",
",",
"fid",
"=",
"0",
")",
":",
"self",
".",
"_checkid",
"(",
"fid",
")",
"i",
"=",
"0",
"if",
"\"constraint\"",
"in",
"self",
".",
"_fitids",
"[",
"fi... | Add constraint. | [
"Add",
"constraint",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L216-L234 | train | 22,968 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.fitspoly | def fitspoly(self, n, x, y, sd=None, wt=1.0, fid=0):
"""Create normal equations from the specified condition equations, and
solve the resulting normal equations. It is in essence a combination.
The method expects that the properties of the fitter to be used have
been initialized or set (like the number of simultaneous solutions m
the type; factors). The main reason is to limit the number of
parameters on the one hand, and on the other hand not to depend
on the actual array structure to get the variables and type. Before
fitting the x-range is normalized to values less than 1 to cater for
large difference in x raised to large powers. Later a shift to make x
around zero will be added as well.
:param n: the order of the polynomial to solve for
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param fid: the id of the sub-fitter (numerical)
Example::
fit = fitserver()
x = N.arange(1,11) # we have values at 10 'x' values
y = 2. + 0.5*x - 0.1*x**2 # which are 2 +0.5x -0.1x^2
fit.fitspoly(3, x, y) # fit a 3-degree polynomial
print fit.solution(), fit.error() # show solution and their errors
"""
a = max(abs(max(x)), abs(min(x)))
if a == 0:
a = 1
a = 1.0 / a
b = NUM.power(a, range(n + 1))
if self.set(n=n + 1, fid=fid):
self.linear(poly(n), x * a, y, sd, wt, fid)
self._fitids[fid]["sol"] *= b
self._fitids[fid]["error"] *= b
return self.linear(poly(n), x, y, sd, wt, fid) | python | def fitspoly(self, n, x, y, sd=None, wt=1.0, fid=0):
"""Create normal equations from the specified condition equations, and
solve the resulting normal equations. It is in essence a combination.
The method expects that the properties of the fitter to be used have
been initialized or set (like the number of simultaneous solutions m
the type; factors). The main reason is to limit the number of
parameters on the one hand, and on the other hand not to depend
on the actual array structure to get the variables and type. Before
fitting the x-range is normalized to values less than 1 to cater for
large difference in x raised to large powers. Later a shift to make x
around zero will be added as well.
:param n: the order of the polynomial to solve for
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param fid: the id of the sub-fitter (numerical)
Example::
fit = fitserver()
x = N.arange(1,11) # we have values at 10 'x' values
y = 2. + 0.5*x - 0.1*x**2 # which are 2 +0.5x -0.1x^2
fit.fitspoly(3, x, y) # fit a 3-degree polynomial
print fit.solution(), fit.error() # show solution and their errors
"""
a = max(abs(max(x)), abs(min(x)))
if a == 0:
a = 1
a = 1.0 / a
b = NUM.power(a, range(n + 1))
if self.set(n=n + 1, fid=fid):
self.linear(poly(n), x * a, y, sd, wt, fid)
self._fitids[fid]["sol"] *= b
self._fitids[fid]["error"] *= b
return self.linear(poly(n), x, y, sd, wt, fid) | [
"def",
"fitspoly",
"(",
"self",
",",
"n",
",",
"x",
",",
"y",
",",
"sd",
"=",
"None",
",",
"wt",
"=",
"1.0",
",",
"fid",
"=",
"0",
")",
":",
"a",
"=",
"max",
"(",
"abs",
"(",
"max",
"(",
"x",
")",
")",
",",
"abs",
"(",
"min",
"(",
"x",
... | Create normal equations from the specified condition equations, and
solve the resulting normal equations. It is in essence a combination.
The method expects that the properties of the fitter to be used have
been initialized or set (like the number of simultaneous solutions m
the type; factors). The main reason is to limit the number of
parameters on the one hand, and on the other hand not to depend
on the actual array structure to get the variables and type. Before
fitting the x-range is normalized to values less than 1 to cater for
large difference in x raised to large powers. Later a shift to make x
around zero will be added as well.
:param n: the order of the polynomial to solve for
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param fid: the id of the sub-fitter (numerical)
Example::
fit = fitserver()
x = N.arange(1,11) # we have values at 10 'x' values
y = 2. + 0.5*x - 0.1*x**2 # which are 2 +0.5x -0.1x^2
fit.fitspoly(3, x, y) # fit a 3-degree polynomial
print fit.solution(), fit.error() # show solution and their errors | [
"Create",
"normal",
"equations",
"from",
"the",
"specified",
"condition",
"equations",
"and",
"solve",
"the",
"resulting",
"normal",
"equations",
".",
"It",
"is",
"in",
"essence",
"a",
"combination",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L240-L279 | train | 22,969 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.functional | def functional(self, fnct, x, y, sd=None, wt=1.0, mxit=50, fid=0):
"""Make a non-linear least squares solution.
This will make a non-linear least squares solution for the points
through the ordinates at the abscissa values, using the specified
`fnct`. Details can be found in the :meth:`linear` description.
:param fnct: the functional to fit
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param mxit: the maximum number of iterations
:param fid: the id of the sub-fitter (numerical)
"""
self._fit(fitfunc="functional", fnct=fnct, x=x, y=y, sd=sd, wt=wt,
mxit=mxit, fid=fid) | python | def functional(self, fnct, x, y, sd=None, wt=1.0, mxit=50, fid=0):
"""Make a non-linear least squares solution.
This will make a non-linear least squares solution for the points
through the ordinates at the abscissa values, using the specified
`fnct`. Details can be found in the :meth:`linear` description.
:param fnct: the functional to fit
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param mxit: the maximum number of iterations
:param fid: the id of the sub-fitter (numerical)
"""
self._fit(fitfunc="functional", fnct=fnct, x=x, y=y, sd=sd, wt=wt,
mxit=mxit, fid=fid) | [
"def",
"functional",
"(",
"self",
",",
"fnct",
",",
"x",
",",
"y",
",",
"sd",
"=",
"None",
",",
"wt",
"=",
"1.0",
",",
"mxit",
"=",
"50",
",",
"fid",
"=",
"0",
")",
":",
"self",
".",
"_fit",
"(",
"fitfunc",
"=",
"\"functional\"",
",",
"fnct",
... | Make a non-linear least squares solution.
This will make a non-linear least squares solution for the points
through the ordinates at the abscissa values, using the specified
`fnct`. Details can be found in the :meth:`linear` description.
:param fnct: the functional to fit
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param mxit: the maximum number of iterations
:param fid: the id of the sub-fitter (numerical) | [
"Make",
"a",
"non",
"-",
"linear",
"least",
"squares",
"solution",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L333-L351 | train | 22,970 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.linear | def linear(self, fnct, x, y, sd=None, wt=1.0, fid=0):
"""Make a linear least squares solution.
Makes a linear least squares solution for the points through the
ordinates at the x values, using the specified fnct. The x can be of
any dimension, depending on the number of arguments needed in the
functional evaluation. The values should be given in the order:
x0[1], x0[2], ..., x1[1], ..., xn[m] if there are n observations,
and m arguments. x should be a vector of m*n length; y (the
observations) a vector of length n.
:param fnct: the functional to fit
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param fid: the id of the sub-fitter (numerical)
"""
self._fit(fitfunc="linear", fnct=fnct, x=x, y=y, sd=sd, wt=wt, fid=fid) | python | def linear(self, fnct, x, y, sd=None, wt=1.0, fid=0):
"""Make a linear least squares solution.
Makes a linear least squares solution for the points through the
ordinates at the x values, using the specified fnct. The x can be of
any dimension, depending on the number of arguments needed in the
functional evaluation. The values should be given in the order:
x0[1], x0[2], ..., x1[1], ..., xn[m] if there are n observations,
and m arguments. x should be a vector of m*n length; y (the
observations) a vector of length n.
:param fnct: the functional to fit
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param fid: the id of the sub-fitter (numerical)
"""
self._fit(fitfunc="linear", fnct=fnct, x=x, y=y, sd=sd, wt=wt, fid=fid) | [
"def",
"linear",
"(",
"self",
",",
"fnct",
",",
"x",
",",
"y",
",",
"sd",
"=",
"None",
",",
"wt",
"=",
"1.0",
",",
"fid",
"=",
"0",
")",
":",
"self",
".",
"_fit",
"(",
"fitfunc",
"=",
"\"linear\"",
",",
"fnct",
"=",
"fnct",
",",
"x",
"=",
"... | Make a linear least squares solution.
Makes a linear least squares solution for the points through the
ordinates at the x values, using the specified fnct. The x can be of
any dimension, depending on the number of arguments needed in the
functional evaluation. The values should be given in the order:
x0[1], x0[2], ..., x1[1], ..., xn[m] if there are n observations,
and m arguments. x should be a vector of m*n length; y (the
observations) a vector of length n.
:param fnct: the functional to fit
:param x: the abscissa values
:param y: the ordinate values
:param sd: standard deviation of equations (one or more values used
cyclically)
:param wt: an optional alternate for `sd`
:param fid: the id of the sub-fitter (numerical) | [
"Make",
"a",
"linear",
"least",
"squares",
"solution",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L355-L375 | train | 22,971 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.constraint | def constraint(self, n=-1, fid=0):
"""Obtain the set of orthogonal equations that make the solution of
the rank deficient normal equations possible.
:param fid: the id of the sub-fitter (numerical)
"""
c = self._getval("constr", fid)
if n < 0 or n > self.deficiency(fid):
return c
else:
raise RuntimeError("Not yet implemented") | python | def constraint(self, n=-1, fid=0):
"""Obtain the set of orthogonal equations that make the solution of
the rank deficient normal equations possible.
:param fid: the id of the sub-fitter (numerical)
"""
c = self._getval("constr", fid)
if n < 0 or n > self.deficiency(fid):
return c
else:
raise RuntimeError("Not yet implemented") | [
"def",
"constraint",
"(",
"self",
",",
"n",
"=",
"-",
"1",
",",
"fid",
"=",
"0",
")",
":",
"c",
"=",
"self",
".",
"_getval",
"(",
"\"constr\"",
",",
"fid",
")",
"if",
"n",
"<",
"0",
"or",
"n",
">",
"self",
".",
"deficiency",
"(",
"fid",
")",
... | Obtain the set of orthogonal equations that make the solution of
the rank deficient normal equations possible.
:param fid: the id of the sub-fitter (numerical) | [
"Obtain",
"the",
"set",
"of",
"orthogonal",
"equations",
"that",
"make",
"the",
"solution",
"of",
"the",
"rank",
"deficient",
"normal",
"equations",
"possible",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L457-L468 | train | 22,972 |
casacore/python-casacore | casacore/fitting/fitting.py | fitserver.fitted | def fitted(self, fid=0):
"""Test if enough Levenberg-Marquardt loops have been done.
It returns True if no improvement possible.
:param fid: the id of the sub-fitter (numerical)
"""
self._checkid(fid)
return not (self._fitids[fid]["fit"] > 0
or self._fitids[fid]["fit"] < -0.001) | python | def fitted(self, fid=0):
"""Test if enough Levenberg-Marquardt loops have been done.
It returns True if no improvement possible.
:param fid: the id of the sub-fitter (numerical)
"""
self._checkid(fid)
return not (self._fitids[fid]["fit"] > 0
or self._fitids[fid]["fit"] < -0.001) | [
"def",
"fitted",
"(",
"self",
",",
"fid",
"=",
"0",
")",
":",
"self",
".",
"_checkid",
"(",
"fid",
")",
"return",
"not",
"(",
"self",
".",
"_fitids",
"[",
"fid",
"]",
"[",
"\"fit\"",
"]",
">",
"0",
"or",
"self",
".",
"_fitids",
"[",
"fid",
"]",... | Test if enough Levenberg-Marquardt loops have been done.
It returns True if no improvement possible.
:param fid: the id of the sub-fitter (numerical) | [
"Test",
"if",
"enough",
"Levenberg",
"-",
"Marquardt",
"loops",
"have",
"been",
"done",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L470-L479 | train | 22,973 |
casacore/python-casacore | casacore/measures/__init__.py | measures.set_data_path | def set_data_path(self, pth):
"""Set the location of the measures data directory.
:param pth: The absolute path to the measures data directory.
"""
if os.path.exists(pth):
if not os.path.exists(os.path.join(pth, 'data', 'geodetic')):
raise IOError("The given path doesn't contain a 'data' "
"subdirectory")
os.environ["AIPSPATH"] = "%s dummy dummy" % pth | python | def set_data_path(self, pth):
"""Set the location of the measures data directory.
:param pth: The absolute path to the measures data directory.
"""
if os.path.exists(pth):
if not os.path.exists(os.path.join(pth, 'data', 'geodetic')):
raise IOError("The given path doesn't contain a 'data' "
"subdirectory")
os.environ["AIPSPATH"] = "%s dummy dummy" % pth | [
"def",
"set_data_path",
"(",
"self",
",",
"pth",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"pth",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"pth",
",",
"'data'",
",",
"'geode... | Set the location of the measures data directory.
:param pth: The absolute path to the measures data directory. | [
"Set",
"the",
"location",
"of",
"the",
"measures",
"data",
"directory",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/measures/__init__.py#L91-L100 | train | 22,974 |
casacore/python-casacore | casacore/measures/__init__.py | measures.asbaseline | def asbaseline(self, pos):
"""Convert a position measure into a baseline measure. No actual
baseline is calculated, since operations can be done on positions,
with subtractions to obtain baselines at a later stage.
:param pos: a position measure
:returns: a baseline measure
"""
if not is_measure(pos) or pos['type'] not in ['position', 'baseline']:
raise TypeError('Argument is not a position/baseline measure')
if pos['type'] == 'position':
loc = self.measure(pos, 'itrf')
loc['type'] = 'baseline'
return self.measure(loc, 'j2000')
return pos | python | def asbaseline(self, pos):
"""Convert a position measure into a baseline measure. No actual
baseline is calculated, since operations can be done on positions,
with subtractions to obtain baselines at a later stage.
:param pos: a position measure
:returns: a baseline measure
"""
if not is_measure(pos) or pos['type'] not in ['position', 'baseline']:
raise TypeError('Argument is not a position/baseline measure')
if pos['type'] == 'position':
loc = self.measure(pos, 'itrf')
loc['type'] = 'baseline'
return self.measure(loc, 'j2000')
return pos | [
"def",
"asbaseline",
"(",
"self",
",",
"pos",
")",
":",
"if",
"not",
"is_measure",
"(",
"pos",
")",
"or",
"pos",
"[",
"'type'",
"]",
"not",
"in",
"[",
"'position'",
",",
"'baseline'",
"]",
":",
"raise",
"TypeError",
"(",
"'Argument is not a position/baseli... | Convert a position measure into a baseline measure. No actual
baseline is calculated, since operations can be done on positions,
with subtractions to obtain baselines at a later stage.
:param pos: a position measure
:returns: a baseline measure | [
"Convert",
"a",
"position",
"measure",
"into",
"a",
"baseline",
"measure",
".",
"No",
"actual",
"baseline",
"is",
"calculated",
"since",
"operations",
"can",
"be",
"done",
"on",
"positions",
"with",
"subtractions",
"to",
"obtain",
"baselines",
"at",
"a",
"late... | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/measures/__init__.py#L599-L614 | train | 22,975 |
casacore/python-casacore | casacore/measures/__init__.py | measures.getvalue | def getvalue(self, v):
"""
Return a list of quantities making up the measures' value.
:param v: a measure
"""
if not is_measure(v):
raise TypeError('Incorrect input type for getvalue()')
import re
rx = re.compile("m\d+")
out = []
keys = v.keys()[:]
keys.sort()
for key in keys:
if re.match(rx, key):
out.append(dq.quantity(v.get(key)))
return out | python | def getvalue(self, v):
"""
Return a list of quantities making up the measures' value.
:param v: a measure
"""
if not is_measure(v):
raise TypeError('Incorrect input type for getvalue()')
import re
rx = re.compile("m\d+")
out = []
keys = v.keys()[:]
keys.sort()
for key in keys:
if re.match(rx, key):
out.append(dq.quantity(v.get(key)))
return out | [
"def",
"getvalue",
"(",
"self",
",",
"v",
")",
":",
"if",
"not",
"is_measure",
"(",
"v",
")",
":",
"raise",
"TypeError",
"(",
"'Incorrect input type for getvalue()'",
")",
"import",
"re",
"rx",
"=",
"re",
".",
"compile",
"(",
"\"m\\d+\"",
")",
"out",
"="... | Return a list of quantities making up the measures' value.
:param v: a measure | [
"Return",
"a",
"list",
"of",
"quantities",
"making",
"up",
"the",
"measures",
"value",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/measures/__init__.py#L618-L634 | train | 22,976 |
casacore/python-casacore | casacore/measures/__init__.py | measures.doframe | def doframe(self, v):
"""This method will set the measure specified as part of a frame.
If conversion from one type to another is necessary (with the measure
function), the following frames should be set if one of the reference
types involved in the conversion is as in the following lists:
**Epoch**
* UTC
* TAI
* LAST - position
* LMST - position
* GMST1
* GAST
* UT1
* UT2
* TDT
* TCG
* TDB
* TCD
**Direction**
* J2000
* JMEAN - epoch
* JTRUE - epoch
* APP - epoch
* B1950
* BMEAN - epoch
* BTRUE - epoch
* GALACTIC
* HADEC - epoch, position
* AZEL - epoch, position
* SUPERGALACTIC
* ECLIPTIC
* MECLIPTIC - epoch
* TECLIPTIC - epoch
* PLANET - epoch, [position]
**Position**
* WGS84
* ITRF
**Radial Velocity**
* LSRK - direction
* LSRD - direction
* BARY - direction
* GEO - direction, epoch
* TOPO - direction, epoch, position
* GALACTO - direction
*
**Doppler**
* RADIO
* OPTICAL
* Z
* RATIO
* RELATIVISTIC
* BETA
* GAMMA
*
**Frequency**
* REST - direction, radialvelocity
* LSRK - direction
* LSRD - direction
* BARY - direction
* GEO - direction, epoch
* TOPO - direction, epoch, position
* GALACTO
"""
if not is_measure(v):
raise TypeError('Argument is not a measure')
if (v["type"] == "frequency" and v["refer"].lower() == "rest") \
or _measures.doframe(self, v):
self._framestack[v["type"]] = v
return True
return False | python | def doframe(self, v):
"""This method will set the measure specified as part of a frame.
If conversion from one type to another is necessary (with the measure
function), the following frames should be set if one of the reference
types involved in the conversion is as in the following lists:
**Epoch**
* UTC
* TAI
* LAST - position
* LMST - position
* GMST1
* GAST
* UT1
* UT2
* TDT
* TCG
* TDB
* TCD
**Direction**
* J2000
* JMEAN - epoch
* JTRUE - epoch
* APP - epoch
* B1950
* BMEAN - epoch
* BTRUE - epoch
* GALACTIC
* HADEC - epoch, position
* AZEL - epoch, position
* SUPERGALACTIC
* ECLIPTIC
* MECLIPTIC - epoch
* TECLIPTIC - epoch
* PLANET - epoch, [position]
**Position**
* WGS84
* ITRF
**Radial Velocity**
* LSRK - direction
* LSRD - direction
* BARY - direction
* GEO - direction, epoch
* TOPO - direction, epoch, position
* GALACTO - direction
*
**Doppler**
* RADIO
* OPTICAL
* Z
* RATIO
* RELATIVISTIC
* BETA
* GAMMA
*
**Frequency**
* REST - direction, radialvelocity
* LSRK - direction
* LSRD - direction
* BARY - direction
* GEO - direction, epoch
* TOPO - direction, epoch, position
* GALACTO
"""
if not is_measure(v):
raise TypeError('Argument is not a measure')
if (v["type"] == "frequency" and v["refer"].lower() == "rest") \
or _measures.doframe(self, v):
self._framestack[v["type"]] = v
return True
return False | [
"def",
"doframe",
"(",
"self",
",",
"v",
")",
":",
"if",
"not",
"is_measure",
"(",
"v",
")",
":",
"raise",
"TypeError",
"(",
"'Argument is not a measure'",
")",
"if",
"(",
"v",
"[",
"\"type\"",
"]",
"==",
"\"frequency\"",
"and",
"v",
"[",
"\"refer\"",
... | This method will set the measure specified as part of a frame.
If conversion from one type to another is necessary (with the measure
function), the following frames should be set if one of the reference
types involved in the conversion is as in the following lists:
**Epoch**
* UTC
* TAI
* LAST - position
* LMST - position
* GMST1
* GAST
* UT1
* UT2
* TDT
* TCG
* TDB
* TCD
**Direction**
* J2000
* JMEAN - epoch
* JTRUE - epoch
* APP - epoch
* B1950
* BMEAN - epoch
* BTRUE - epoch
* GALACTIC
* HADEC - epoch, position
* AZEL - epoch, position
* SUPERGALACTIC
* ECLIPTIC
* MECLIPTIC - epoch
* TECLIPTIC - epoch
* PLANET - epoch, [position]
**Position**
* WGS84
* ITRF
**Radial Velocity**
* LSRK - direction
* LSRD - direction
* BARY - direction
* GEO - direction, epoch
* TOPO - direction, epoch, position
* GALACTO - direction
*
**Doppler**
* RADIO
* OPTICAL
* Z
* RATIO
* RELATIVISTIC
* BETA
* GAMMA
*
**Frequency**
* REST - direction, radialvelocity
* LSRK - direction
* LSRD - direction
* BARY - direction
* GEO - direction, epoch
* TOPO - direction, epoch, position
* GALACTO | [
"This",
"method",
"will",
"set",
"the",
"measure",
"specified",
"as",
"part",
"of",
"a",
"frame",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/measures/__init__.py#L673-L756 | train | 22,977 |
casacore/python-casacore | casacore/tables/msutil.py | addImagingColumns | def addImagingColumns(msname, ack=True):
""" Add the columns to an MS needed for the casa imager.
It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT.
It also sets the CHANNEL_SELECTION keyword needed for the older casa
imagers.
A column is not added if already existing.
"""
# numpy is needed
import numpy as np
# Open the MS
t = table(msname, readonly=False, ack=False)
cnames = t.colnames()
# Get the description of the DATA column.
try:
cdesc = t.getcoldesc('DATA')
except:
raise ValueError('Column DATA does not exist')
# Determine if the DATA storage specification is tiled.
hasTiled = False
try:
dminfo = t.getdminfo("DATA")
if dminfo['TYPE'][:5] == 'Tiled':
hasTiled = True
except:
hasTiled = False
# Use TiledShapeStMan if needed.
if not hasTiled:
dminfo = {'TYPE': 'TiledShapeStMan',
'SPEC': {'DEFAULTTILESHAPE': [4, 32, 128]}}
# Add the columns(if not existing). Use the description of the DATA column.
if 'MODEL_DATA' in cnames:
six.print_("Column MODEL_DATA not added; it already exists")
else:
dminfo['NAME'] = 'modeldata'
cdesc['comment'] = 'The model data column'
t.addcols(maketabdesc(makecoldesc('MODEL_DATA', cdesc)), dminfo)
if ack:
six.print_("added column MODEL_DATA")
if 'CORRECTED_DATA' in cnames:
six.print_("Column CORRECTED_DATA not added; it already exists")
else:
dminfo['NAME'] = 'correcteddata'
cdesc['comment'] = 'The corrected data column'
t.addcols(maketabdesc(makecoldesc('CORRECTED_DATA', cdesc)), dminfo)
if ack:
six.print_("'added column CORRECTED_DATA")
if 'IMAGING_WEIGHT' in cnames:
six.print_("Column IMAGING_WEIGHT not added; it already exists")
else:
# Add IMAGING_WEIGHT which is 1-dim and has type float.
# It needs a shape, otherwise the CASA imager complains.
shp = []
if 'shape' in cdesc:
shp = cdesc['shape']
if len(shp) > 0:
shp = [shp[0]] # use nchan from shape
else:
shp = [t.getcell('DATA', 0).shape[0]] # use nchan from actual data
cd = makearrcoldesc('IMAGING_WEIGHT', 0, ndim=1, shape=shp,
valuetype='float')
dminfo = {'TYPE': 'TiledShapeStMan',
'SPEC': {'DEFAULTTILESHAPE': [32, 128]}}
dminfo['NAME'] = 'imagingweight'
t.addcols(maketabdesc(cd), dminfo)
if ack:
six.print_("added column IMAGING_WEIGHT")
# Add or overwrite keyword CHANNEL_SELECTION.
if 'CHANNEL_SELECTION' in t.colkeywordnames('MODEL_DATA'):
t.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION')
# Define the CHANNEL_SELECTION keyword containing the channels of
# all spectral windows.
tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False)
nchans = tspw.getcol('NUM_CHAN')
chans = [[0, nch] for nch in nchans]
t.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION', np.int32(chans))
if ack:
six.print_("defined keyword CHANNEL_SELECTION in column MODEL_DATA")
# Flush the table to make sure it is written.
t.flush() | python | def addImagingColumns(msname, ack=True):
""" Add the columns to an MS needed for the casa imager.
It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT.
It also sets the CHANNEL_SELECTION keyword needed for the older casa
imagers.
A column is not added if already existing.
"""
# numpy is needed
import numpy as np
# Open the MS
t = table(msname, readonly=False, ack=False)
cnames = t.colnames()
# Get the description of the DATA column.
try:
cdesc = t.getcoldesc('DATA')
except:
raise ValueError('Column DATA does not exist')
# Determine if the DATA storage specification is tiled.
hasTiled = False
try:
dminfo = t.getdminfo("DATA")
if dminfo['TYPE'][:5] == 'Tiled':
hasTiled = True
except:
hasTiled = False
# Use TiledShapeStMan if needed.
if not hasTiled:
dminfo = {'TYPE': 'TiledShapeStMan',
'SPEC': {'DEFAULTTILESHAPE': [4, 32, 128]}}
# Add the columns(if not existing). Use the description of the DATA column.
if 'MODEL_DATA' in cnames:
six.print_("Column MODEL_DATA not added; it already exists")
else:
dminfo['NAME'] = 'modeldata'
cdesc['comment'] = 'The model data column'
t.addcols(maketabdesc(makecoldesc('MODEL_DATA', cdesc)), dminfo)
if ack:
six.print_("added column MODEL_DATA")
if 'CORRECTED_DATA' in cnames:
six.print_("Column CORRECTED_DATA not added; it already exists")
else:
dminfo['NAME'] = 'correcteddata'
cdesc['comment'] = 'The corrected data column'
t.addcols(maketabdesc(makecoldesc('CORRECTED_DATA', cdesc)), dminfo)
if ack:
six.print_("'added column CORRECTED_DATA")
if 'IMAGING_WEIGHT' in cnames:
six.print_("Column IMAGING_WEIGHT not added; it already exists")
else:
# Add IMAGING_WEIGHT which is 1-dim and has type float.
# It needs a shape, otherwise the CASA imager complains.
shp = []
if 'shape' in cdesc:
shp = cdesc['shape']
if len(shp) > 0:
shp = [shp[0]] # use nchan from shape
else:
shp = [t.getcell('DATA', 0).shape[0]] # use nchan from actual data
cd = makearrcoldesc('IMAGING_WEIGHT', 0, ndim=1, shape=shp,
valuetype='float')
dminfo = {'TYPE': 'TiledShapeStMan',
'SPEC': {'DEFAULTTILESHAPE': [32, 128]}}
dminfo['NAME'] = 'imagingweight'
t.addcols(maketabdesc(cd), dminfo)
if ack:
six.print_("added column IMAGING_WEIGHT")
# Add or overwrite keyword CHANNEL_SELECTION.
if 'CHANNEL_SELECTION' in t.colkeywordnames('MODEL_DATA'):
t.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION')
# Define the CHANNEL_SELECTION keyword containing the channels of
# all spectral windows.
tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False)
nchans = tspw.getcol('NUM_CHAN')
chans = [[0, nch] for nch in nchans]
t.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION', np.int32(chans))
if ack:
six.print_("defined keyword CHANNEL_SELECTION in column MODEL_DATA")
# Flush the table to make sure it is written.
t.flush() | [
"def",
"addImagingColumns",
"(",
"msname",
",",
"ack",
"=",
"True",
")",
":",
"# numpy is needed",
"import",
"numpy",
"as",
"np",
"# Open the MS",
"t",
"=",
"table",
"(",
"msname",
",",
"readonly",
"=",
"False",
",",
"ack",
"=",
"False",
")",
"cnames",
"... | Add the columns to an MS needed for the casa imager.
It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT.
It also sets the CHANNEL_SELECTION keyword needed for the older casa
imagers.
A column is not added if already existing. | [
"Add",
"the",
"columns",
"to",
"an",
"MS",
"needed",
"for",
"the",
"casa",
"imager",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/msutil.py#L48-L128 | train | 22,978 |
casacore/python-casacore | casacore/tables/msutil.py | addDerivedMSCal | def addDerivedMSCal(msname):
""" Add the derived columns like HA to an MS or CalTable.
It adds the columns HA, HA1, HA2, PA1, PA2, LAST, LAST1, LAST2, AZEL1,
AZEL2, and UVW_J2000.
They are all bound to the DerivedMSCal virtual data manager.
It fails if one of the columns already exists.
"""
# Open the MS
t = table(msname, readonly=False, ack=False)
colnames = t.colnames()
# Check that the columns needed by DerivedMSCal are present.
# Note that ANTENNA2 and FEED2 are not required.
for col in ["TIME", "ANTENNA1", "FIELD_ID", "FEED1"]:
if col not in colnames:
raise ValueError("Columns " + colnames +
" should be present in table " + msname)
scols1 = ['HA', 'HA1', 'HA2', 'PA1', 'PA2']
scols2 = ['LAST', 'LAST1', 'LAST2']
acols1 = ['AZEL1', 'AZEL2']
acols2 = ['UVW_J2000']
descs = []
# Define the columns and their units.
for col in scols1:
descs.append(makescacoldesc(col, 0.,
keywords={"QuantumUnits": ["rad"]}))
for col in scols2:
descs.append(makescacoldesc(col, 0.,
keywords={"QuantumUnits": ["d"]}))
for col in acols1:
descs.append(makearrcoldesc(col, 0.,
keywords={"QuantumUnits": ["rad", "rad"]}))
for col in acols2:
descs.append(makearrcoldesc(col, 0.,
keywords={"QuantumUnits": ["m", "m", "m"],
"MEASINFO": {"Ref": "J2000",
"type": "uvw"}}))
# Add all columns using DerivedMSCal as data manager.
dminfo = {"TYPE": "DerivedMSCal", "NAME": "", "SPEC": {}}
t.addcols(maketabdesc(descs), dminfo)
# Flush the table to make sure it is written.
t.flush() | python | def addDerivedMSCal(msname):
""" Add the derived columns like HA to an MS or CalTable.
It adds the columns HA, HA1, HA2, PA1, PA2, LAST, LAST1, LAST2, AZEL1,
AZEL2, and UVW_J2000.
They are all bound to the DerivedMSCal virtual data manager.
It fails if one of the columns already exists.
"""
# Open the MS
t = table(msname, readonly=False, ack=False)
colnames = t.colnames()
# Check that the columns needed by DerivedMSCal are present.
# Note that ANTENNA2 and FEED2 are not required.
for col in ["TIME", "ANTENNA1", "FIELD_ID", "FEED1"]:
if col not in colnames:
raise ValueError("Columns " + colnames +
" should be present in table " + msname)
scols1 = ['HA', 'HA1', 'HA2', 'PA1', 'PA2']
scols2 = ['LAST', 'LAST1', 'LAST2']
acols1 = ['AZEL1', 'AZEL2']
acols2 = ['UVW_J2000']
descs = []
# Define the columns and their units.
for col in scols1:
descs.append(makescacoldesc(col, 0.,
keywords={"QuantumUnits": ["rad"]}))
for col in scols2:
descs.append(makescacoldesc(col, 0.,
keywords={"QuantumUnits": ["d"]}))
for col in acols1:
descs.append(makearrcoldesc(col, 0.,
keywords={"QuantumUnits": ["rad", "rad"]}))
for col in acols2:
descs.append(makearrcoldesc(col, 0.,
keywords={"QuantumUnits": ["m", "m", "m"],
"MEASINFO": {"Ref": "J2000",
"type": "uvw"}}))
# Add all columns using DerivedMSCal as data manager.
dminfo = {"TYPE": "DerivedMSCal", "NAME": "", "SPEC": {}}
t.addcols(maketabdesc(descs), dminfo)
# Flush the table to make sure it is written.
t.flush() | [
"def",
"addDerivedMSCal",
"(",
"msname",
")",
":",
"# Open the MS",
"t",
"=",
"table",
"(",
"msname",
",",
"readonly",
"=",
"False",
",",
"ack",
"=",
"False",
")",
"colnames",
"=",
"t",
".",
"colnames",
"(",
")",
"# Check that the columns needed by DerivedMSCa... | Add the derived columns like HA to an MS or CalTable.
It adds the columns HA, HA1, HA2, PA1, PA2, LAST, LAST1, LAST2, AZEL1,
AZEL2, and UVW_J2000.
They are all bound to the DerivedMSCal virtual data manager.
It fails if one of the columns already exists. | [
"Add",
"the",
"derived",
"columns",
"like",
"HA",
"to",
"an",
"MS",
"or",
"CalTable",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/msutil.py#L145-L189 | train | 22,979 |
casacore/python-casacore | casacore/tables/msutil.py | removeDerivedMSCal | def removeDerivedMSCal(msname):
""" Remove the derived columns like HA from an MS or CalTable.
It removes the columns using the data manager DerivedMSCal.
Such columns are HA, HA1, HA2, PA1, PA2, LAST, LAST1, LAST2, AZEL1,
AZEL2, and UVW_J2000.
It fails if one of the columns already exists.
"""
# Open the MS
t = table(msname, readonly=False, ack=False)
# Remove the columns stored as DerivedMSCal.
dmi = t.getdminfo()
for x in dmi.values():
if x['TYPE'] == 'DerivedMSCal':
t.removecols(x['COLUMNS'])
t.flush() | python | def removeDerivedMSCal(msname):
""" Remove the derived columns like HA from an MS or CalTable.
It removes the columns using the data manager DerivedMSCal.
Such columns are HA, HA1, HA2, PA1, PA2, LAST, LAST1, LAST2, AZEL1,
AZEL2, and UVW_J2000.
It fails if one of the columns already exists.
"""
# Open the MS
t = table(msname, readonly=False, ack=False)
# Remove the columns stored as DerivedMSCal.
dmi = t.getdminfo()
for x in dmi.values():
if x['TYPE'] == 'DerivedMSCal':
t.removecols(x['COLUMNS'])
t.flush() | [
"def",
"removeDerivedMSCal",
"(",
"msname",
")",
":",
"# Open the MS",
"t",
"=",
"table",
"(",
"msname",
",",
"readonly",
"=",
"False",
",",
"ack",
"=",
"False",
")",
"# Remove the columns stored as DerivedMSCal.",
"dmi",
"=",
"t",
".",
"getdminfo",
"(",
")",
... | Remove the derived columns like HA from an MS or CalTable.
It removes the columns using the data manager DerivedMSCal.
Such columns are HA, HA1, HA2, PA1, PA2, LAST, LAST1, LAST2, AZEL1,
AZEL2, and UVW_J2000.
It fails if one of the columns already exists. | [
"Remove",
"the",
"derived",
"columns",
"like",
"HA",
"from",
"an",
"MS",
"or",
"CalTable",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/msutil.py#L192-L210 | train | 22,980 |
casacore/python-casacore | casacore/tables/msutil.py | msregularize | def msregularize(msname, newname):
""" Regularize an MS
The output MS will be such that it has the same number of baselines
for each time stamp. Where needed fully flagged rows are added.
Possibly missing rows are written into a separate MS <newname>-add.
It is concatenated with the original MS and sorted in order of TIME,
DATADESC_ID, ANTENNA1,ANTENNA2 to form a new regular MS. Note that
the new MS references the input MS (it does not copy the data).
It means that changes made in the new MS are also made in the input MS.
If no rows were missing, the new MS is still created referencing the
input MS.
"""
# Find out all baselines.
t = table(msname)
t1 = t.sort('unique ANTENNA1,ANTENNA2')
nadded = 0
# Now iterate in time,band over the MS.
for tsub in t.iter(['TIME', 'DATA_DESC_ID']):
nmissing = t1.nrows() - tsub.nrows()
if nmissing < 0:
raise ValueError("A time/band chunk has too many rows")
if nmissing > 0:
# Rows needs to be added for the missing baselines.
ant1 = str(t1.getcol('ANTENNA1')).replace(' ', ',')
ant2 = str(t1.getcol('ANTENNA2')).replace(' ', ',')
ant1 = tsub.getcol('ANTENNA1')
ant2 = tsub.getcol('ANTENNA2')
t2 = taql('select from $t1 where !any(ANTENNA1 == $ant1 &&' +
' ANTENNA2 == $ant2)')
six.print_(nmissing, t1.nrows(), tsub.nrows(), t2.nrows())
if t2.nrows() != nmissing:
raise ValueError("A time/band chunk behaves strangely")
# If nothing added yet, create a new table.
# (which has to be reopened for read/write).
# Otherwise append to that new table.
if nadded == 0:
tnew = t2.copy(newname + "_add", deep=True)
tnew = table(newname + "_add", readonly=False)
else:
t2.copyrows(tnew)
# Set the correct time and band in the new rows.
tnew.putcell('TIME',
range(nadded, nadded + nmissing),
tsub.getcell('TIME', 0))
tnew.putcell('DATA_DESC_ID',
range(nadded, nadded + nmissing),
tsub.getcell('DATA_DESC_ID', 0))
nadded += nmissing
# Combine the existing table and new table.
if nadded > 0:
# First initialize data and flags in the added rows.
taql('update $tnew set DATA=0+0i')
taql('update $tnew set FLAG=True')
tcomb = table([t, tnew])
tcomb.rename(newname + '_adds')
tcombs = tcomb.sort('TIME,DATA_DESC_ID,ANTENNA1,ANTENNA2')
else:
tcombs = t.query(offset=0)
tcombs.rename(newname)
six.print_(newname, 'has been created; it references the original MS')
if nadded > 0:
six.print_(' and', newname + '_adds', 'containing', nadded, 'new rows')
else:
six.print_(' no rows needed to be added') | python | def msregularize(msname, newname):
""" Regularize an MS
The output MS will be such that it has the same number of baselines
for each time stamp. Where needed fully flagged rows are added.
Possibly missing rows are written into a separate MS <newname>-add.
It is concatenated with the original MS and sorted in order of TIME,
DATADESC_ID, ANTENNA1,ANTENNA2 to form a new regular MS. Note that
the new MS references the input MS (it does not copy the data).
It means that changes made in the new MS are also made in the input MS.
If no rows were missing, the new MS is still created referencing the
input MS.
"""
# Find out all baselines.
t = table(msname)
t1 = t.sort('unique ANTENNA1,ANTENNA2')
nadded = 0
# Now iterate in time,band over the MS.
for tsub in t.iter(['TIME', 'DATA_DESC_ID']):
nmissing = t1.nrows() - tsub.nrows()
if nmissing < 0:
raise ValueError("A time/band chunk has too many rows")
if nmissing > 0:
# Rows needs to be added for the missing baselines.
ant1 = str(t1.getcol('ANTENNA1')).replace(' ', ',')
ant2 = str(t1.getcol('ANTENNA2')).replace(' ', ',')
ant1 = tsub.getcol('ANTENNA1')
ant2 = tsub.getcol('ANTENNA2')
t2 = taql('select from $t1 where !any(ANTENNA1 == $ant1 &&' +
' ANTENNA2 == $ant2)')
six.print_(nmissing, t1.nrows(), tsub.nrows(), t2.nrows())
if t2.nrows() != nmissing:
raise ValueError("A time/band chunk behaves strangely")
# If nothing added yet, create a new table.
# (which has to be reopened for read/write).
# Otherwise append to that new table.
if nadded == 0:
tnew = t2.copy(newname + "_add", deep=True)
tnew = table(newname + "_add", readonly=False)
else:
t2.copyrows(tnew)
# Set the correct time and band in the new rows.
tnew.putcell('TIME',
range(nadded, nadded + nmissing),
tsub.getcell('TIME', 0))
tnew.putcell('DATA_DESC_ID',
range(nadded, nadded + nmissing),
tsub.getcell('DATA_DESC_ID', 0))
nadded += nmissing
# Combine the existing table and new table.
if nadded > 0:
# First initialize data and flags in the added rows.
taql('update $tnew set DATA=0+0i')
taql('update $tnew set FLAG=True')
tcomb = table([t, tnew])
tcomb.rename(newname + '_adds')
tcombs = tcomb.sort('TIME,DATA_DESC_ID,ANTENNA1,ANTENNA2')
else:
tcombs = t.query(offset=0)
tcombs.rename(newname)
six.print_(newname, 'has been created; it references the original MS')
if nadded > 0:
six.print_(' and', newname + '_adds', 'containing', nadded, 'new rows')
else:
six.print_(' no rows needed to be added') | [
"def",
"msregularize",
"(",
"msname",
",",
"newname",
")",
":",
"# Find out all baselines.",
"t",
"=",
"table",
"(",
"msname",
")",
"t1",
"=",
"t",
".",
"sort",
"(",
"'unique ANTENNA1,ANTENNA2'",
")",
"nadded",
"=",
"0",
"# Now iterate in time,band over the MS.",
... | Regularize an MS
The output MS will be such that it has the same number of baselines
for each time stamp. Where needed fully flagged rows are added.
Possibly missing rows are written into a separate MS <newname>-add.
It is concatenated with the original MS and sorted in order of TIME,
DATADESC_ID, ANTENNA1,ANTENNA2 to form a new regular MS. Note that
the new MS references the input MS (it does not copy the data).
It means that changes made in the new MS are also made in the input MS.
If no rows were missing, the new MS is still created referencing the
input MS. | [
"Regularize",
"an",
"MS"
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/msutil.py#L362-L429 | train | 22,981 |
casacore/python-casacore | casacore/tables/tablecolumn.py | tablecolumn._repr_html_ | def _repr_html_(self):
"""Give a nice representation of columns in notebooks."""
out="<table class='taqltable'>\n"
# Print column name (not if it is auto-generated)
if not(self.name()[:4]=="Col_"):
out+="<tr>"
out+="<th><b>"+self.name()+"</b></th>"
out+="</tr>"
cropped=False
rowcount=0
colkeywords=self.getkeywords()
for row in self:
out +="\n<tr>"
out += "<td>" + _format_cell(row, colkeywords) + "</td>\n"
out += "</tr>\n"
rowcount+=1
out+="\n"
if rowcount>=20:
cropped=True
break
if out[-2:]=="\n\n":
out=out[:-1]
out+="</table>"
if cropped:
out+="<p style='text-align:center'>("+str(self.nrows()-20)+" more rows)</p>\n"
return out | python | def _repr_html_(self):
"""Give a nice representation of columns in notebooks."""
out="<table class='taqltable'>\n"
# Print column name (not if it is auto-generated)
if not(self.name()[:4]=="Col_"):
out+="<tr>"
out+="<th><b>"+self.name()+"</b></th>"
out+="</tr>"
cropped=False
rowcount=0
colkeywords=self.getkeywords()
for row in self:
out +="\n<tr>"
out += "<td>" + _format_cell(row, colkeywords) + "</td>\n"
out += "</tr>\n"
rowcount+=1
out+="\n"
if rowcount>=20:
cropped=True
break
if out[-2:]=="\n\n":
out=out[:-1]
out+="</table>"
if cropped:
out+="<p style='text-align:center'>("+str(self.nrows()-20)+" more rows)</p>\n"
return out | [
"def",
"_repr_html_",
"(",
"self",
")",
":",
"out",
"=",
"\"<table class='taqltable'>\\n\"",
"# Print column name (not if it is auto-generated)",
"if",
"not",
"(",
"self",
".",
"name",
"(",
")",
"[",
":",
"4",
"]",
"==",
"\"Col_\"",
")",
":",
"out",
"+=",
"\"<... | Give a nice representation of columns in notebooks. | [
"Give",
"a",
"nice",
"representation",
"of",
"columns",
"in",
"notebooks",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tablecolumn.py#L305-L336 | train | 22,982 |
casacore/python-casacore | casacore/images/coordinates.py | coordinatesystem._get_coordinatenames | def _get_coordinatenames(self):
"""Create ordered list of coordinate names
"""
validnames = ("direction", "spectral", "linear", "stokes", "tabular")
self._names = [""] * len(validnames)
n = 0
for key in self._csys.keys():
for name in validnames:
if key.startswith(name):
idx = int(key[len(name):])
self._names[idx] = name
n += 1
# reverse as we are c order in python
self._names = self._names[:n][::-1]
if len(self._names) == 0:
raise LookupError("Coordinate record doesn't contain valid coordinates") | python | def _get_coordinatenames(self):
"""Create ordered list of coordinate names
"""
validnames = ("direction", "spectral", "linear", "stokes", "tabular")
self._names = [""] * len(validnames)
n = 0
for key in self._csys.keys():
for name in validnames:
if key.startswith(name):
idx = int(key[len(name):])
self._names[idx] = name
n += 1
# reverse as we are c order in python
self._names = self._names[:n][::-1]
if len(self._names) == 0:
raise LookupError("Coordinate record doesn't contain valid coordinates") | [
"def",
"_get_coordinatenames",
"(",
"self",
")",
":",
"validnames",
"=",
"(",
"\"direction\"",
",",
"\"spectral\"",
",",
"\"linear\"",
",",
"\"stokes\"",
",",
"\"tabular\"",
")",
"self",
".",
"_names",
"=",
"[",
"\"\"",
"]",
"*",
"len",
"(",
"validnames",
... | Create ordered list of coordinate names | [
"Create",
"ordered",
"list",
"of",
"coordinate",
"names"
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/coordinates.py#L71-L87 | train | 22,983 |
casacore/python-casacore | casacore/images/coordinates.py | directioncoordinate.set_projection | def set_projection(self, val):
"""Set the projection of the given axis in this coordinate.
The known projections are SIN, ZEA, TAN, NCP, AIT, ZEA
"""
knownproj = ["SIN", "ZEA", "TAN", "NCP", "AIT", "ZEA"] # etc
assert val.upper() in knownproj
self._coord["projection"] = val.upper() | python | def set_projection(self, val):
"""Set the projection of the given axis in this coordinate.
The known projections are SIN, ZEA, TAN, NCP, AIT, ZEA
"""
knownproj = ["SIN", "ZEA", "TAN", "NCP", "AIT", "ZEA"] # etc
assert val.upper() in knownproj
self._coord["projection"] = val.upper() | [
"def",
"set_projection",
"(",
"self",
",",
"val",
")",
":",
"knownproj",
"=",
"[",
"\"SIN\"",
",",
"\"ZEA\"",
",",
"\"TAN\"",
",",
"\"NCP\"",
",",
"\"AIT\"",
",",
"\"ZEA\"",
"]",
"# etc",
"assert",
"val",
".",
"upper",
"(",
")",
"in",
"knownproj",
"sel... | Set the projection of the given axis in this coordinate.
The known projections are SIN, ZEA, TAN, NCP, AIT, ZEA | [
"Set",
"the",
"projection",
"of",
"the",
"given",
"axis",
"in",
"this",
"coordinate",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/coordinates.py#L250-L257 | train | 22,984 |
casacore/python-casacore | casacore/tables/tableutil.py | tablefromascii | def tablefromascii(tablename, asciifile,
headerfile='',
autoheader=False, autoshape=[],
columnnames=[], datatypes=[],
sep=' ',
commentmarker='',
firstline=1, lastline=-1,
readonly=True,
lockoptions='default', ack=True):
"""Create a table from an ASCII file.
Create a table from a file in ASCII format. Columnar data as well as
table and column keywords may be specified.
Once the table is created from the ASCII data, it is opened in the
specified mode and a table object is returned.
The table columns are filled from a file containing the data values
separated by a separator (one line per table row). The default
separator is a blank. Blanks before and after the separator are ignored.
If a non-blank separator is used, values can be empty. Such values
default to 0, empty string, or F depending on the data type. E.g.
1,,2, has 4 values of which the 2nd and 4th are empty and default to 0.
Similarly if fewer values are given than needed, the missing values
get the default value.
Either the data format can be explicitly specified or it can be found
automatically. The former gives more control in ambiguous situations.
Both scalar and array columns can be generated from the ASCII input.
The format string determines the type and optional shape.
It is possible to give the column names and their data types in
various ways:
- Using 2 header lines (as described below) as the first two lines
in the data file or in a separate header file. This is the default way.
- Derive them automatically from the data (`autoheader=True`).
- Using the arguments `columnnames` and
`datatypes` (as non-empty vectors of strings).
It implies (`autoheader=False`). The data types should be
given in the same way as done in headers.
In automatic mode (`autoheader=True`) the first line
of the ASCII data is analyzed
to deduce the data types. Only the types I, D, and A can be
recognized. A number without decimal point or exponent is I (integer),
otherwise it is D (double). Any other string is A (string).
Note that a number may contain a leading sign (+ or -).
The `autoshape` argument can be used to specify if the input
should be stored as multiple scalars (the default) or as a single
array. In the latter case one axis in the shape can be defined as
variable length by giving it the value 0. It means that the actual
array shape in a row is determined by the number of values in the
corresponding input line.
Columns get the names `Column1`, `Column2`, etc..
For example:
1. `autoshape=[]` (which is the default) means that all values
are to be stored as scalar columns.
2. `autoshape=0` means that all values in a row are to be stored as
a variable length vector.
3. `autoshape=10` defines a fixed length vector. If an input
line contains less than 10 values, the vector is filled with default
values. If more than 10 values, the latter values are ignored.
4. `autoshape=[5,0]` defines a 2-dim array of which the 2nd axis is
variable. Note that if an input line does not contain a multiple of 5
values, the array is filled with default values.
If the format of the table is explicitly specified, it has to be done
either in the first two lines of the data file (named by the
argument filename), or in a separate header file (named by the
argument headerfile). In both forms, table keywords may also be
specified before the column definitions.
The column names and types can be described by two lines:
1. The first line contains the names of the columns.
These names may be enclosed in quotes (either single or double).
2. The second line contains the data type and optionally the shape
of each column. Valid types are:
- S for Short data
- I for Integer data
- R for Real data
- D for Double Precision data
- X for Complex data (Real followed by Imaginary)
- Z for Complex data (Amplitude then Phase)
- DX for Double Precision Complex data (Real followed by Imaginary)
- DZ for Double Precision Complex data (Amplitude then Phase)
- A for ASCII data (a value must be enclosed in single or double quotes
if it contains whitespace)
- B for Boolean data (False are empty string, 0, or any string
starting with F, f, N, or n).
If a column is an array, the shape has to be given after the data type
without any whitespace. E.g. `I10` defines an integer vector
of length 10. `A2,5` defines a 2-dim string array with shape
[2,5]. Note that `I` is not the same as `I1` as the
first one defines a scalar and the other one a vector with length 1.
The last column can have one variable length axis denoted by the value 0.
It "consumes" the remainder of the input line.
If the argument headerfile is set then the header information is
read from that file instead of the first lines of the data file.
To give a simple example of the form where the header information
is located at the top of the data file::
COLI COLF COLD COLX COLZ COLS
I R D X Z A
1 1.1 1.11 1.12 1.13 1.14 1.15 Str1
10 11 12 13 14 15 16 ""
Note that a complex number consists of 2 numbers.
Also note that an empty string can be given.
Let us now give an example of a separate header file that one might use to
get interferometer data into casacore::
U V W TIME ANT1 ANT2 DATA
R R R D I I X1,0
The data file would then look like::
124.011 54560.0 3477.1 43456789.0990 1 2 4.327 -0.1132
34561.0 45629.3 3900.5 43456789.0990 1 3 5.398 0.4521
Note that the DATA column is defined as a 2-dim array of 1
correlation and a variable number of channels, so the actual number of
channels is determined by the input. In this example both rows will
have 1 channel (note that a complex value contains 2 values).
Tables may have keywords in addition to the columns. The keywords
are useful for holding information that is global to the entire
table (such as author, revision, history, etc.).
The keywords in the header definitions must preceed the column descriptions.
They must be enclosed between a line that starts with ".key..." and
a line that starts with ".endkey..." (where ... can be anything).
A table keywordset and column keywordsets can be specified.
The latter can be specified by specifying the column name after
the .keywords string.
Between these two lines each line should contain the following:
- The keyword name, e.g., ANYKEY
- The datatype and optional shape of the keyword
(cf. list of valid types above)
- The value or values for the keyword (the keyword may contain
a scalar or an array of values). e.g., 3.14159 21.78945
Thus to continue the example above, one might wish to add keywords
as follows::
.keywords
DATE A "97/1/16"
REVISION D 2.01
AUTHOR A "Tim Cornwell"
INSTRUMENT A "VLA"
.endkeywords
.keywords TIME
UNIT A "s"
.endkeywords
U V W TIME ANT1 ANT2 DATA
R R R D I I X1,0
Similarly to the column format string, the keyword formats can also
contain shape information. The only difference is that if no shape is
given, a keyword can have multiple values (making it a vector).
It is possible to ignore comment lines in the header and data file
by giving the `commentmarker`. It indicates that lines
starting with the given marker are ignored. Note that the marker can
be a regular expression (e.g. `' *//'` tells that lines starting
with // and optionally preceeded by blanks have to be ignored).
With the arguments `firstline` and `lastline` one can
specify which lines have to be taken from the input file. A negative value
means 1 for `firstline` or end-of-file for `lastline`.
Note that if the headers and data are combined in one file,
these line arguments apply to the whole file. If headers and data are in
separate files, these line arguments apply to the data file only.
Also note that ignored comment lines are counted, thus are used to
determine which lines are in the line range.
The number of rows is determined by the number of lines read from the data
file.
"""
import os.path
filename = os.path.expandvars(asciifile)
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
s = "File '%s' not found" % (filename)
raise IOError(s)
if headerfile != '':
filename = os.path.expandvars(headerfile)
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
s = "File '%s' not found" % (filename)
raise IOError(s)
tab = table(asciifile, headerfile, tablename, autoheader, autoshape,
sep, commentmarker, firstline, lastline,
_columnnames=columnnames, _datatypes=datatypes, _oper=1)
six.print_('Input format: [' + tab._getasciiformat() + ']')
# Close table and reopen it in correct way.
tab = 0
return table(tablename, readonly=readonly, lockoptions=lockoptions,
ack=ack) | python | def tablefromascii(tablename, asciifile,
headerfile='',
autoheader=False, autoshape=[],
columnnames=[], datatypes=[],
sep=' ',
commentmarker='',
firstline=1, lastline=-1,
readonly=True,
lockoptions='default', ack=True):
"""Create a table from an ASCII file.
Create a table from a file in ASCII format. Columnar data as well as
table and column keywords may be specified.
Once the table is created from the ASCII data, it is opened in the
specified mode and a table object is returned.
The table columns are filled from a file containing the data values
separated by a separator (one line per table row). The default
separator is a blank. Blanks before and after the separator are ignored.
If a non-blank separator is used, values can be empty. Such values
default to 0, empty string, or F depending on the data type. E.g.
1,,2, has 4 values of which the 2nd and 4th are empty and default to 0.
Similarly if fewer values are given than needed, the missing values
get the default value.
Either the data format can be explicitly specified or it can be found
automatically. The former gives more control in ambiguous situations.
Both scalar and array columns can be generated from the ASCII input.
The format string determines the type and optional shape.
It is possible to give the column names and their data types in
various ways:
- Using 2 header lines (as described below) as the first two lines
in the data file or in a separate header file. This is the default way.
- Derive them automatically from the data (`autoheader=True`).
- Using the arguments `columnnames` and
`datatypes` (as non-empty vectors of strings).
It implies (`autoheader=False`). The data types should be
given in the same way as done in headers.
In automatic mode (`autoheader=True`) the first line
of the ASCII data is analyzed
to deduce the data types. Only the types I, D, and A can be
recognized. A number without decimal point or exponent is I (integer),
otherwise it is D (double). Any other string is A (string).
Note that a number may contain a leading sign (+ or -).
The `autoshape` argument can be used to specify if the input
should be stored as multiple scalars (the default) or as a single
array. In the latter case one axis in the shape can be defined as
variable length by giving it the value 0. It means that the actual
array shape in a row is determined by the number of values in the
corresponding input line.
Columns get the names `Column1`, `Column2`, etc..
For example:
1. `autoshape=[]` (which is the default) means that all values
are to be stored as scalar columns.
2. `autoshape=0` means that all values in a row are to be stored as
a variable length vector.
3. `autoshape=10` defines a fixed length vector. If an input
line contains less than 10 values, the vector is filled with default
values. If more than 10 values, the latter values are ignored.
4. `autoshape=[5,0]` defines a 2-dim array of which the 2nd axis is
variable. Note that if an input line does not contain a multiple of 5
values, the array is filled with default values.
If the format of the table is explicitly specified, it has to be done
either in the first two lines of the data file (named by the
argument filename), or in a separate header file (named by the
argument headerfile). In both forms, table keywords may also be
specified before the column definitions.
The column names and types can be described by two lines:
1. The first line contains the names of the columns.
These names may be enclosed in quotes (either single or double).
2. The second line contains the data type and optionally the shape
of each column. Valid types are:
- S for Short data
- I for Integer data
- R for Real data
- D for Double Precision data
- X for Complex data (Real followed by Imaginary)
- Z for Complex data (Amplitude then Phase)
- DX for Double Precision Complex data (Real followed by Imaginary)
- DZ for Double Precision Complex data (Amplitude then Phase)
- A for ASCII data (a value must be enclosed in single or double quotes
if it contains whitespace)
- B for Boolean data (False are empty string, 0, or any string
starting with F, f, N, or n).
If a column is an array, the shape has to be given after the data type
without any whitespace. E.g. `I10` defines an integer vector
of length 10. `A2,5` defines a 2-dim string array with shape
[2,5]. Note that `I` is not the same as `I1` as the
first one defines a scalar and the other one a vector with length 1.
The last column can have one variable length axis denoted by the value 0.
It "consumes" the remainder of the input line.
If the argument headerfile is set then the header information is
read from that file instead of the first lines of the data file.
To give a simple example of the form where the header information
is located at the top of the data file::
COLI COLF COLD COLX COLZ COLS
I R D X Z A
1 1.1 1.11 1.12 1.13 1.14 1.15 Str1
10 11 12 13 14 15 16 ""
Note that a complex number consists of 2 numbers.
Also note that an empty string can be given.
Let us now give an example of a separate header file that one might use to
get interferometer data into casacore::
U V W TIME ANT1 ANT2 DATA
R R R D I I X1,0
The data file would then look like::
124.011 54560.0 3477.1 43456789.0990 1 2 4.327 -0.1132
34561.0 45629.3 3900.5 43456789.0990 1 3 5.398 0.4521
Note that the DATA column is defined as a 2-dim array of 1
correlation and a variable number of channels, so the actual number of
channels is determined by the input. In this example both rows will
have 1 channel (note that a complex value contains 2 values).
Tables may have keywords in addition to the columns. The keywords
are useful for holding information that is global to the entire
table (such as author, revision, history, etc.).
The keywords in the header definitions must preceed the column descriptions.
They must be enclosed between a line that starts with ".key..." and
a line that starts with ".endkey..." (where ... can be anything).
A table keywordset and column keywordsets can be specified.
The latter can be specified by specifying the column name after
the .keywords string.
Between these two lines each line should contain the following:
- The keyword name, e.g., ANYKEY
- The datatype and optional shape of the keyword
(cf. list of valid types above)
- The value or values for the keyword (the keyword may contain
a scalar or an array of values). e.g., 3.14159 21.78945
Thus to continue the example above, one might wish to add keywords
as follows::
.keywords
DATE A "97/1/16"
REVISION D 2.01
AUTHOR A "Tim Cornwell"
INSTRUMENT A "VLA"
.endkeywords
.keywords TIME
UNIT A "s"
.endkeywords
U V W TIME ANT1 ANT2 DATA
R R R D I I X1,0
Similarly to the column format string, the keyword formats can also
contain shape information. The only difference is that if no shape is
given, a keyword can have multiple values (making it a vector).
It is possible to ignore comment lines in the header and data file
by giving the `commentmarker`. It indicates that lines
starting with the given marker are ignored. Note that the marker can
be a regular expression (e.g. `' *//'` tells that lines starting
with // and optionally preceeded by blanks have to be ignored).
With the arguments `firstline` and `lastline` one can
specify which lines have to be taken from the input file. A negative value
means 1 for `firstline` or end-of-file for `lastline`.
Note that if the headers and data are combined in one file,
these line arguments apply to the whole file. If headers and data are in
separate files, these line arguments apply to the data file only.
Also note that ignored comment lines are counted, thus are used to
determine which lines are in the line range.
The number of rows is determined by the number of lines read from the data
file.
"""
import os.path
filename = os.path.expandvars(asciifile)
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
s = "File '%s' not found" % (filename)
raise IOError(s)
if headerfile != '':
filename = os.path.expandvars(headerfile)
filename = os.path.expanduser(filename)
if not os.path.exists(filename):
s = "File '%s' not found" % (filename)
raise IOError(s)
tab = table(asciifile, headerfile, tablename, autoheader, autoshape,
sep, commentmarker, firstline, lastline,
_columnnames=columnnames, _datatypes=datatypes, _oper=1)
six.print_('Input format: [' + tab._getasciiformat() + ']')
# Close table and reopen it in correct way.
tab = 0
return table(tablename, readonly=readonly, lockoptions=lockoptions,
ack=ack) | [
"def",
"tablefromascii",
"(",
"tablename",
",",
"asciifile",
",",
"headerfile",
"=",
"''",
",",
"autoheader",
"=",
"False",
",",
"autoshape",
"=",
"[",
"]",
",",
"columnnames",
"=",
"[",
"]",
",",
"datatypes",
"=",
"[",
"]",
",",
"sep",
"=",
"' '",
"... | Create a table from an ASCII file.
Create a table from a file in ASCII format. Columnar data as well as
table and column keywords may be specified.
Once the table is created from the ASCII data, it is opened in the
specified mode and a table object is returned.
The table columns are filled from a file containing the data values
separated by a separator (one line per table row). The default
separator is a blank. Blanks before and after the separator are ignored.
If a non-blank separator is used, values can be empty. Such values
default to 0, empty string, or F depending on the data type. E.g.
1,,2, has 4 values of which the 2nd and 4th are empty and default to 0.
Similarly if fewer values are given than needed, the missing values
get the default value.
Either the data format can be explicitly specified or it can be found
automatically. The former gives more control in ambiguous situations.
Both scalar and array columns can be generated from the ASCII input.
The format string determines the type and optional shape.
It is possible to give the column names and their data types in
various ways:
- Using 2 header lines (as described below) as the first two lines
in the data file or in a separate header file. This is the default way.
- Derive them automatically from the data (`autoheader=True`).
- Using the arguments `columnnames` and
`datatypes` (as non-empty vectors of strings).
It implies (`autoheader=False`). The data types should be
given in the same way as done in headers.
In automatic mode (`autoheader=True`) the first line
of the ASCII data is analyzed
to deduce the data types. Only the types I, D, and A can be
recognized. A number without decimal point or exponent is I (integer),
otherwise it is D (double). Any other string is A (string).
Note that a number may contain a leading sign (+ or -).
The `autoshape` argument can be used to specify if the input
should be stored as multiple scalars (the default) or as a single
array. In the latter case one axis in the shape can be defined as
variable length by giving it the value 0. It means that the actual
array shape in a row is determined by the number of values in the
corresponding input line.
Columns get the names `Column1`, `Column2`, etc..
For example:
1. `autoshape=[]` (which is the default) means that all values
are to be stored as scalar columns.
2. `autoshape=0` means that all values in a row are to be stored as
a variable length vector.
3. `autoshape=10` defines a fixed length vector. If an input
line contains less than 10 values, the vector is filled with default
values. If more than 10 values, the latter values are ignored.
4. `autoshape=[5,0]` defines a 2-dim array of which the 2nd axis is
variable. Note that if an input line does not contain a multiple of 5
values, the array is filled with default values.
If the format of the table is explicitly specified, it has to be done
either in the first two lines of the data file (named by the
argument filename), or in a separate header file (named by the
argument headerfile). In both forms, table keywords may also be
specified before the column definitions.
The column names and types can be described by two lines:
1. The first line contains the names of the columns.
These names may be enclosed in quotes (either single or double).
2. The second line contains the data type and optionally the shape
of each column. Valid types are:
- S for Short data
- I for Integer data
- R for Real data
- D for Double Precision data
- X for Complex data (Real followed by Imaginary)
- Z for Complex data (Amplitude then Phase)
- DX for Double Precision Complex data (Real followed by Imaginary)
- DZ for Double Precision Complex data (Amplitude then Phase)
- A for ASCII data (a value must be enclosed in single or double quotes
if it contains whitespace)
- B for Boolean data (False are empty string, 0, or any string
starting with F, f, N, or n).
If a column is an array, the shape has to be given after the data type
without any whitespace. E.g. `I10` defines an integer vector
of length 10. `A2,5` defines a 2-dim string array with shape
[2,5]. Note that `I` is not the same as `I1` as the
first one defines a scalar and the other one a vector with length 1.
The last column can have one variable length axis denoted by the value 0.
It "consumes" the remainder of the input line.
If the argument headerfile is set then the header information is
read from that file instead of the first lines of the data file.
To give a simple example of the form where the header information
is located at the top of the data file::
COLI COLF COLD COLX COLZ COLS
I R D X Z A
1 1.1 1.11 1.12 1.13 1.14 1.15 Str1
10 11 12 13 14 15 16 ""
Note that a complex number consists of 2 numbers.
Also note that an empty string can be given.
Let us now give an example of a separate header file that one might use to
get interferometer data into casacore::
U V W TIME ANT1 ANT2 DATA
R R R D I I X1,0
The data file would then look like::
124.011 54560.0 3477.1 43456789.0990 1 2 4.327 -0.1132
34561.0 45629.3 3900.5 43456789.0990 1 3 5.398 0.4521
Note that the DATA column is defined as a 2-dim array of 1
correlation and a variable number of channels, so the actual number of
channels is determined by the input. In this example both rows will
have 1 channel (note that a complex value contains 2 values).
Tables may have keywords in addition to the columns. The keywords
are useful for holding information that is global to the entire
table (such as author, revision, history, etc.).
The keywords in the header definitions must preceed the column descriptions.
They must be enclosed between a line that starts with ".key..." and
a line that starts with ".endkey..." (where ... can be anything).
A table keywordset and column keywordsets can be specified.
The latter can be specified by specifying the column name after
the .keywords string.
Between these two lines each line should contain the following:
- The keyword name, e.g., ANYKEY
- The datatype and optional shape of the keyword
(cf. list of valid types above)
- The value or values for the keyword (the keyword may contain
a scalar or an array of values). e.g., 3.14159 21.78945
Thus to continue the example above, one might wish to add keywords
as follows::
.keywords
DATE A "97/1/16"
REVISION D 2.01
AUTHOR A "Tim Cornwell"
INSTRUMENT A "VLA"
.endkeywords
.keywords TIME
UNIT A "s"
.endkeywords
U V W TIME ANT1 ANT2 DATA
R R R D I I X1,0
Similarly to the column format string, the keyword formats can also
contain shape information. The only difference is that if no shape is
given, a keyword can have multiple values (making it a vector).
It is possible to ignore comment lines in the header and data file
by giving the `commentmarker`. It indicates that lines
starting with the given marker are ignored. Note that the marker can
be a regular expression (e.g. `' *//'` tells that lines starting
with // and optionally preceeded by blanks have to be ignored).
With the arguments `firstline` and `lastline` one can
specify which lines have to be taken from the input file. A negative value
means 1 for `firstline` or end-of-file for `lastline`.
Note that if the headers and data are combined in one file,
these line arguments apply to the whole file. If headers and data are in
separate files, these line arguments apply to the data file only.
Also note that ignored comment lines are counted, thus are used to
determine which lines are in the line range.
The number of rows is determined by the number of lines read from the data
file. | [
"Create",
"a",
"table",
"from",
"an",
"ASCII",
"file",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L35-L240 | train | 22,985 |
casacore/python-casacore | casacore/tables/tableutil.py | makescacoldesc | def makescacoldesc(columnname, value,
datamanagertype='',
datamanagergroup='',
options=0, maxlen=0, comment='',
valuetype='', keywords={}):
"""Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan.
"""
vtype = valuetype
if vtype == '':
vtype = _value_type_name(value)
rec2 = {'valueType': vtype,
'dataManagerType': datamanagertype,
'dataManagerGroup': datamanagergroup,
'option': options,
'maxlen': maxlen,
'comment': comment,
'keywords': keywords}
return {'name': columnname,
'desc': rec2} | python | def makescacoldesc(columnname, value,
datamanagertype='',
datamanagergroup='',
options=0, maxlen=0, comment='',
valuetype='', keywords={}):
"""Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan.
"""
vtype = valuetype
if vtype == '':
vtype = _value_type_name(value)
rec2 = {'valueType': vtype,
'dataManagerType': datamanagertype,
'dataManagerGroup': datamanagergroup,
'option': options,
'maxlen': maxlen,
'comment': comment,
'keywords': keywords}
return {'name': columnname,
'desc': rec2} | [
"def",
"makescacoldesc",
"(",
"columnname",
",",
"value",
",",
"datamanagertype",
"=",
"''",
",",
"datamanagergroup",
"=",
"''",
",",
"options",
"=",
"0",
",",
"maxlen",
"=",
"0",
",",
"comment",
"=",
"''",
",",
"valuetype",
"=",
"''",
",",
"keywords",
... | Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan. | [
"Create",
"description",
"of",
"a",
"scalar",
"column",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L244-L313 | train | 22,986 |
casacore/python-casacore | casacore/tables/tableutil.py | makearrcoldesc | def makearrcoldesc(columnname, value, ndim=0,
shape=[], datamanagertype='',
datamanagergroup='',
options=0, maxlen=0, comment='',
valuetype='', keywords={}):
"""Create description of an array column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the dimensionality, shape, data manager name, group, option,
and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`name`
The name of the column.
`value`
A data value, which is only used to determine the data type of the column.
It is only used if argument `valuetype` is not given.
`ndim`
Optionally the number of dimensions. A value > 0 means that all
arrays in the column must have that dimensionality. Note that the
arrays can still differ in shape unless the shape vector is also given.
`shape`
An optional sequence of integers giving the shape of the array in each
cell. If given, it forces option FixedShape (see below) and sets the
number of dimensions (if not given). All arrays in the column get the
given shape and the array is created as soon as a row is added.
Note that the shape vector gives the shape in each table cell; the
number of rows in the table should NOT be part of it.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default),
IncrementalStMan, TiledColumnStMan, TiledCellStMan, or TiledShapeStMan.
The tiled storage managers are usually used for bigger data arrays.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Optionally numeric array options which can be added to combine them.
`1` means Direct.
It tells that the data are directly stored in the table. Direct
forces option FixedShape. If not given, the array is indirect, which
means that the data will be stored in a separate file.
`4` means FixedShape.
This option does not need to be given, because it is enforced if
the shape is given. FixedShape means that the shape of the array must
be the same in each cell of the column. Otherwise the array shapes may
be different in each column cell and is it possible that a cell does
not contain an array at all.
Note that when given (or implicitly by option Direct), the
shape argument must be given as well.
Default is 0, thus indirect and variable shaped.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
acd1= makescacoldesc("arr1", 1., 0, [2,3,4])
td = maketabdesc(acd1)
This creates a table description consisting of an array column `arr1`
containing 3-dim arrays of doubles with shape [2,3,4].
"""
vtype = valuetype
if vtype == '':
vtype = _value_type_name(value)
if len(shape) > 0:
if ndim <= 0:
ndim = len(shape)
rec2 = {'valueType': vtype,
'dataManagerType': datamanagertype,
'dataManagerGroup': datamanagergroup,
'ndim': ndim,
'shape': shape,
'_c_order': True,
'option': options,
'maxlen': maxlen,
'comment': comment,
'keywords': keywords}
return {'name': columnname,
'desc': rec2} | python | def makearrcoldesc(columnname, value, ndim=0,
shape=[], datamanagertype='',
datamanagergroup='',
options=0, maxlen=0, comment='',
valuetype='', keywords={}):
"""Create description of an array column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the dimensionality, shape, data manager name, group, option,
and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`name`
The name of the column.
`value`
A data value, which is only used to determine the data type of the column.
It is only used if argument `valuetype` is not given.
`ndim`
Optionally the number of dimensions. A value > 0 means that all
arrays in the column must have that dimensionality. Note that the
arrays can still differ in shape unless the shape vector is also given.
`shape`
An optional sequence of integers giving the shape of the array in each
cell. If given, it forces option FixedShape (see below) and sets the
number of dimensions (if not given). All arrays in the column get the
given shape and the array is created as soon as a row is added.
Note that the shape vector gives the shape in each table cell; the
number of rows in the table should NOT be part of it.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default),
IncrementalStMan, TiledColumnStMan, TiledCellStMan, or TiledShapeStMan.
The tiled storage managers are usually used for bigger data arrays.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Optionally numeric array options which can be added to combine them.
`1` means Direct.
It tells that the data are directly stored in the table. Direct
forces option FixedShape. If not given, the array is indirect, which
means that the data will be stored in a separate file.
`4` means FixedShape.
This option does not need to be given, because it is enforced if
the shape is given. FixedShape means that the shape of the array must
be the same in each cell of the column. Otherwise the array shapes may
be different in each column cell and is it possible that a cell does
not contain an array at all.
Note that when given (or implicitly by option Direct), the
shape argument must be given as well.
Default is 0, thus indirect and variable shaped.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
acd1= makescacoldesc("arr1", 1., 0, [2,3,4])
td = maketabdesc(acd1)
This creates a table description consisting of an array column `arr1`
containing 3-dim arrays of doubles with shape [2,3,4].
"""
vtype = valuetype
if vtype == '':
vtype = _value_type_name(value)
if len(shape) > 0:
if ndim <= 0:
ndim = len(shape)
rec2 = {'valueType': vtype,
'dataManagerType': datamanagertype,
'dataManagerGroup': datamanagergroup,
'ndim': ndim,
'shape': shape,
'_c_order': True,
'option': options,
'maxlen': maxlen,
'comment': comment,
'keywords': keywords}
return {'name': columnname,
'desc': rec2} | [
"def",
"makearrcoldesc",
"(",
"columnname",
",",
"value",
",",
"ndim",
"=",
"0",
",",
"shape",
"=",
"[",
"]",
",",
"datamanagertype",
"=",
"''",
",",
"datamanagergroup",
"=",
"''",
",",
"options",
"=",
"0",
",",
"maxlen",
"=",
"0",
",",
"comment",
"=... | Create description of an array column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the dimensionality, shape, data manager name, group, option,
and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`name`
The name of the column.
`value`
A data value, which is only used to determine the data type of the column.
It is only used if argument `valuetype` is not given.
`ndim`
Optionally the number of dimensions. A value > 0 means that all
arrays in the column must have that dimensionality. Note that the
arrays can still differ in shape unless the shape vector is also given.
`shape`
An optional sequence of integers giving the shape of the array in each
cell. If given, it forces option FixedShape (see below) and sets the
number of dimensions (if not given). All arrays in the column get the
given shape and the array is created as soon as a row is added.
Note that the shape vector gives the shape in each table cell; the
number of rows in the table should NOT be part of it.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default),
IncrementalStMan, TiledColumnStMan, TiledCellStMan, or TiledShapeStMan.
The tiled storage managers are usually used for bigger data arrays.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Optionally numeric array options which can be added to combine them.
`1` means Direct.
It tells that the data are directly stored in the table. Direct
forces option FixedShape. If not given, the array is indirect, which
means that the data will be stored in a separate file.
`4` means FixedShape.
This option does not need to be given, because it is enforced if
the shape is given. FixedShape means that the shape of the array must
be the same in each cell of the column. Otherwise the array shapes may
be different in each column cell and is it possible that a cell does
not contain an array at all.
Note that when given (or implicitly by option Direct), the
shape argument must be given as well.
Default is 0, thus indirect and variable shaped.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
acd1= makescacoldesc("arr1", 1., 0, [2,3,4])
td = maketabdesc(acd1)
This creates a table description consisting of an array column `arr1`
containing 3-dim arrays of doubles with shape [2,3,4]. | [
"Create",
"description",
"of",
"an",
"array",
"column",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L317-L417 | train | 22,987 |
casacore/python-casacore | casacore/tables/tableutil.py | maketabdesc | def maketabdesc(descs=[]):
"""Create a table description.
Creates a table description from a set of column descriptions. The
resulting table description can be used in the :class:`table` constructor.
For example::
scd1 = makescacoldesc("col2", "aa")
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
scd3 = makescacoldesc("colrec1", {})
acd1 = makearrcoldesc("arr1", 1, 0, [2,3,4])
acd2 = makearrcoldesc("arr2", 0.+0j)
td = maketabdesc([scd1, scd2, scd3, acd1, acd2])
t = table("mytable", td, nrow=100)
| This creates a table description `td` from five column descriptions
and then creates a 100-row table called `mytable` from the table
description.
| The columns contain respectivily strings, integer scalars, records,
3D integer arrays with fixed shape [2,3,4], and complex arrays with
variable shape.
"""
rec = {}
# If a single dict is given, make a list of it.
if isinstance(descs, dict):
descs = [descs]
for desc in descs:
colname = desc['name']
if colname in rec:
raise ValueError('Column name ' + colname + ' multiply used in table description')
rec[colname] = desc['desc']
return rec | python | def maketabdesc(descs=[]):
"""Create a table description.
Creates a table description from a set of column descriptions. The
resulting table description can be used in the :class:`table` constructor.
For example::
scd1 = makescacoldesc("col2", "aa")
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
scd3 = makescacoldesc("colrec1", {})
acd1 = makearrcoldesc("arr1", 1, 0, [2,3,4])
acd2 = makearrcoldesc("arr2", 0.+0j)
td = maketabdesc([scd1, scd2, scd3, acd1, acd2])
t = table("mytable", td, nrow=100)
| This creates a table description `td` from five column descriptions
and then creates a 100-row table called `mytable` from the table
description.
| The columns contain respectivily strings, integer scalars, records,
3D integer arrays with fixed shape [2,3,4], and complex arrays with
variable shape.
"""
rec = {}
# If a single dict is given, make a list of it.
if isinstance(descs, dict):
descs = [descs]
for desc in descs:
colname = desc['name']
if colname in rec:
raise ValueError('Column name ' + colname + ' multiply used in table description')
rec[colname] = desc['desc']
return rec | [
"def",
"maketabdesc",
"(",
"descs",
"=",
"[",
"]",
")",
":",
"rec",
"=",
"{",
"}",
"# If a single dict is given, make a list of it.",
"if",
"isinstance",
"(",
"descs",
",",
"dict",
")",
":",
"descs",
"=",
"[",
"descs",
"]",
"for",
"desc",
"in",
"descs",
... | Create a table description.
Creates a table description from a set of column descriptions. The
resulting table description can be used in the :class:`table` constructor.
For example::
scd1 = makescacoldesc("col2", "aa")
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
scd3 = makescacoldesc("colrec1", {})
acd1 = makearrcoldesc("arr1", 1, 0, [2,3,4])
acd2 = makearrcoldesc("arr2", 0.+0j)
td = maketabdesc([scd1, scd2, scd3, acd1, acd2])
t = table("mytable", td, nrow=100)
| This creates a table description `td` from five column descriptions
and then creates a 100-row table called `mytable` from the table
description.
| The columns contain respectivily strings, integer scalars, records,
3D integer arrays with fixed shape [2,3,4], and complex arrays with
variable shape. | [
"Create",
"a",
"table",
"description",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L450-L483 | train | 22,988 |
casacore/python-casacore | casacore/tables/tableutil.py | makedminfo | def makedminfo(tabdesc, group_spec=None):
"""Creates a data manager information object.
Create a data manager information dictionary outline from a table description.
The resulting dictionary is a bare outline and is available for the purposes of
further customising the data manager via the `group_spec` argument.
The resulting dictionary can be used in the :class:`table` constructor and
the :meth:`default_ms` and :meth:`default_ms_subtable` functions.
`tabdesc`
The table description
`group_spec`
The SPEC for a data manager group. In practice this is useful for
setting the Default Tile Size and Maximum Cache Size for the Data Manager
{
'WeightColumnGroup' : {
'DEFAULTTILESHAPE': np.int32([4,4,4]),
'MAXIMUMCACHESIZE': 1000,
}
}
This should be used with care.
"""
if group_spec is None:
group_spec = {}
class DMGroup(object):
"""
Keep track of the columns, type and spec of each data manager group
"""
def __init__(self):
self.columns = []
self.type = None
self.spec = None
dm_groups = defaultdict(DMGroup)
# Iterate through the table columns, grouping them
# by their dataManagerGroup
for c, d in six.iteritems(tabdesc):
if c in ('_define_hypercolumn_', '_keywords_', '_private_keywords_'):
continue
# Extract group and data manager type
group = d.get("dataManagerGroup", "StandardStMan")
type_ = d.get("dataManagerType", "StandardStMan")
# Set defaults if necessary
if not group:
group = "StandardStMan"
if not type_:
type_ = "StandardStMan"
# Obtain the (possibly empty) data manager group
dm_group = dm_groups[group]
# Add the column
dm_group.columns.append(c)
# Set the spec
if dm_group.spec is None:
dm_group.spec = group_spec.get(group, {})
# Check that the data manager type is consistent across columns
if dm_group.type is None:
dm_group.type = type_
elif not dm_group.type == type_:
raise ValueError("Mismatched dataManagerType '%s' "
"for dataManagerGroup '%s' "
"Previously, the type was '%s'" %
(type_, group, dm_group.type))
# Output a data manager entry
return {
'*%d'%(i+1): {
'COLUMNS': dm_group.columns,
'TYPE': dm_group.type,
'NAME': group,
'SPEC' : dm_group.spec,
'SEQNR': i
} for i, (group, dm_group)
in enumerate(six.iteritems(dm_groups))
} | python | def makedminfo(tabdesc, group_spec=None):
"""Creates a data manager information object.
Create a data manager information dictionary outline from a table description.
The resulting dictionary is a bare outline and is available for the purposes of
further customising the data manager via the `group_spec` argument.
The resulting dictionary can be used in the :class:`table` constructor and
the :meth:`default_ms` and :meth:`default_ms_subtable` functions.
`tabdesc`
The table description
`group_spec`
The SPEC for a data manager group. In practice this is useful for
setting the Default Tile Size and Maximum Cache Size for the Data Manager
{
'WeightColumnGroup' : {
'DEFAULTTILESHAPE': np.int32([4,4,4]),
'MAXIMUMCACHESIZE': 1000,
}
}
This should be used with care.
"""
if group_spec is None:
group_spec = {}
class DMGroup(object):
"""
Keep track of the columns, type and spec of each data manager group
"""
def __init__(self):
self.columns = []
self.type = None
self.spec = None
dm_groups = defaultdict(DMGroup)
# Iterate through the table columns, grouping them
# by their dataManagerGroup
for c, d in six.iteritems(tabdesc):
if c in ('_define_hypercolumn_', '_keywords_', '_private_keywords_'):
continue
# Extract group and data manager type
group = d.get("dataManagerGroup", "StandardStMan")
type_ = d.get("dataManagerType", "StandardStMan")
# Set defaults if necessary
if not group:
group = "StandardStMan"
if not type_:
type_ = "StandardStMan"
# Obtain the (possibly empty) data manager group
dm_group = dm_groups[group]
# Add the column
dm_group.columns.append(c)
# Set the spec
if dm_group.spec is None:
dm_group.spec = group_spec.get(group, {})
# Check that the data manager type is consistent across columns
if dm_group.type is None:
dm_group.type = type_
elif not dm_group.type == type_:
raise ValueError("Mismatched dataManagerType '%s' "
"for dataManagerGroup '%s' "
"Previously, the type was '%s'" %
(type_, group, dm_group.type))
# Output a data manager entry
return {
'*%d'%(i+1): {
'COLUMNS': dm_group.columns,
'TYPE': dm_group.type,
'NAME': group,
'SPEC' : dm_group.spec,
'SEQNR': i
} for i, (group, dm_group)
in enumerate(six.iteritems(dm_groups))
} | [
"def",
"makedminfo",
"(",
"tabdesc",
",",
"group_spec",
"=",
"None",
")",
":",
"if",
"group_spec",
"is",
"None",
":",
"group_spec",
"=",
"{",
"}",
"class",
"DMGroup",
"(",
"object",
")",
":",
"\"\"\"\n Keep track of the columns, type and spec of each data manager... | Creates a data manager information object.
Create a data manager information dictionary outline from a table description.
The resulting dictionary is a bare outline and is available for the purposes of
further customising the data manager via the `group_spec` argument.
The resulting dictionary can be used in the :class:`table` constructor and
the :meth:`default_ms` and :meth:`default_ms_subtable` functions.
`tabdesc`
The table description
`group_spec`
The SPEC for a data manager group. In practice this is useful for
setting the Default Tile Size and Maximum Cache Size for the Data Manager
{
'WeightColumnGroup' : {
'DEFAULTTILESHAPE': np.int32([4,4,4]),
'MAXIMUMCACHESIZE': 1000,
}
}
This should be used with care. | [
"Creates",
"a",
"data",
"manager",
"information",
"object",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L485-L569 | train | 22,989 |
casacore/python-casacore | casacore/tables/tableutil.py | tabledefinehypercolumn | def tabledefinehypercolumn(tabdesc,
name, ndim, datacolumns,
coordcolumns=False,
idcolumns=False):
"""Add a hypercolumn to a table description.
It defines a hypercolumn and adds it the given table description.
A hypercolumn is an entity used by the Tiled Storage Managers (TSM). It
defines which columns have to be stored together with a TSM.
It should only be used by expert users who want to use a TSM to its
full extent. For a basic TSM s hypercolumn definition is not needed.
tabledesc
A table description (result from :func:`maketabdesc`).
name
Name of hypercolumn
ndim
Dimensionality of hypercolumn; normally 1 more than the dimensionality
of the arrays in the data columns to be stored with the TSM
datacolumns
Data columns to be stored with TSM
coordcolumns
Optional coordinate columns to be stored with TSM
idcolumns
Optional id columns to be stored with TSM
For example::
scd1 = makescacoldesc("col2", "aa")
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
scd3 = makescacoldesc("colrec1", {})
acd1 = makearrcoldesc("arr1", 1, 0, [2,3,4])
acd2 = makearrcoldesc("arr2", as_complex(0))
td = maketabdesc([scd1, scd2, scd3, acd1, acd2])
tabledefinehypercolumn(td, "TiledArray", 4, ["arr1"])
tab = table("mytable", tabledesc=td, nrow=100)
| This creates a table description `td` from five column descriptions
and then creates a 100-row table called mytable from the table
description.
| The columns contain respectivily strings, integer scalars, records,
3D integer arrays with fixed shape [2,3,4], and complex arrays with
variable shape.
| The first array is stored with the Tiled Storage Manager (in this case
the TiledColumnStMan).
"""
rec = {'HCndim': ndim,
'HCdatanames': datacolumns}
if not isinstance(coordcolumns, bool):
rec['HCcoordnames'] = coordcolumns
if not isinstance(idcolumns, bool):
rec['HCidnames'] = idcolumns
if '_define_hypercolumn_' not in tabdesc:
tabdesc['_define_hypercolumn_'] = {}
tabdesc['_define_hypercolumn_'][name] = rec | python | def tabledefinehypercolumn(tabdesc,
name, ndim, datacolumns,
coordcolumns=False,
idcolumns=False):
"""Add a hypercolumn to a table description.
It defines a hypercolumn and adds it the given table description.
A hypercolumn is an entity used by the Tiled Storage Managers (TSM). It
defines which columns have to be stored together with a TSM.
It should only be used by expert users who want to use a TSM to its
full extent. For a basic TSM s hypercolumn definition is not needed.
tabledesc
A table description (result from :func:`maketabdesc`).
name
Name of hypercolumn
ndim
Dimensionality of hypercolumn; normally 1 more than the dimensionality
of the arrays in the data columns to be stored with the TSM
datacolumns
Data columns to be stored with TSM
coordcolumns
Optional coordinate columns to be stored with TSM
idcolumns
Optional id columns to be stored with TSM
For example::
scd1 = makescacoldesc("col2", "aa")
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
scd3 = makescacoldesc("colrec1", {})
acd1 = makearrcoldesc("arr1", 1, 0, [2,3,4])
acd2 = makearrcoldesc("arr2", as_complex(0))
td = maketabdesc([scd1, scd2, scd3, acd1, acd2])
tabledefinehypercolumn(td, "TiledArray", 4, ["arr1"])
tab = table("mytable", tabledesc=td, nrow=100)
| This creates a table description `td` from five column descriptions
and then creates a 100-row table called mytable from the table
description.
| The columns contain respectivily strings, integer scalars, records,
3D integer arrays with fixed shape [2,3,4], and complex arrays with
variable shape.
| The first array is stored with the Tiled Storage Manager (in this case
the TiledColumnStMan).
"""
rec = {'HCndim': ndim,
'HCdatanames': datacolumns}
if not isinstance(coordcolumns, bool):
rec['HCcoordnames'] = coordcolumns
if not isinstance(idcolumns, bool):
rec['HCidnames'] = idcolumns
if '_define_hypercolumn_' not in tabdesc:
tabdesc['_define_hypercolumn_'] = {}
tabdesc['_define_hypercolumn_'][name] = rec | [
"def",
"tabledefinehypercolumn",
"(",
"tabdesc",
",",
"name",
",",
"ndim",
",",
"datacolumns",
",",
"coordcolumns",
"=",
"False",
",",
"idcolumns",
"=",
"False",
")",
":",
"rec",
"=",
"{",
"'HCndim'",
":",
"ndim",
",",
"'HCdatanames'",
":",
"datacolumns",
... | Add a hypercolumn to a table description.
It defines a hypercolumn and adds it the given table description.
A hypercolumn is an entity used by the Tiled Storage Managers (TSM). It
defines which columns have to be stored together with a TSM.
It should only be used by expert users who want to use a TSM to its
full extent. For a basic TSM s hypercolumn definition is not needed.
tabledesc
A table description (result from :func:`maketabdesc`).
name
Name of hypercolumn
ndim
Dimensionality of hypercolumn; normally 1 more than the dimensionality
of the arrays in the data columns to be stored with the TSM
datacolumns
Data columns to be stored with TSM
coordcolumns
Optional coordinate columns to be stored with TSM
idcolumns
Optional id columns to be stored with TSM
For example::
scd1 = makescacoldesc("col2", "aa")
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
scd3 = makescacoldesc("colrec1", {})
acd1 = makearrcoldesc("arr1", 1, 0, [2,3,4])
acd2 = makearrcoldesc("arr2", as_complex(0))
td = maketabdesc([scd1, scd2, scd3, acd1, acd2])
tabledefinehypercolumn(td, "TiledArray", 4, ["arr1"])
tab = table("mytable", tabledesc=td, nrow=100)
| This creates a table description `td` from five column descriptions
and then creates a 100-row table called mytable from the table
description.
| The columns contain respectivily strings, integer scalars, records,
3D integer arrays with fixed shape [2,3,4], and complex arrays with
variable shape.
| The first array is stored with the Tiled Storage Manager (in this case
the TiledColumnStMan). | [
"Add",
"a",
"hypercolumn",
"to",
"a",
"table",
"description",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L579-L635 | train | 22,990 |
casacore/python-casacore | casacore/tables/tableutil.py | tabledelete | def tabledelete(tablename, checksubtables=False, ack=True):
"""Delete a table on disk.
It is the same as :func:`table.delete`, but without the need to open
the table first.
"""
tabname = _remove_prefix(tablename)
t = table(tabname, ack=False)
if t.ismultiused(checksubtables):
six.print_('Table', tabname, 'cannot be deleted; it is still in use')
else:
t = 0
table(tabname, readonly=False, _delete=True, ack=False)
if ack:
six.print_('Table', tabname, 'has been deleted') | python | def tabledelete(tablename, checksubtables=False, ack=True):
"""Delete a table on disk.
It is the same as :func:`table.delete`, but without the need to open
the table first.
"""
tabname = _remove_prefix(tablename)
t = table(tabname, ack=False)
if t.ismultiused(checksubtables):
six.print_('Table', tabname, 'cannot be deleted; it is still in use')
else:
t = 0
table(tabname, readonly=False, _delete=True, ack=False)
if ack:
six.print_('Table', tabname, 'has been deleted') | [
"def",
"tabledelete",
"(",
"tablename",
",",
"checksubtables",
"=",
"False",
",",
"ack",
"=",
"True",
")",
":",
"tabname",
"=",
"_remove_prefix",
"(",
"tablename",
")",
"t",
"=",
"table",
"(",
"tabname",
",",
"ack",
"=",
"False",
")",
"if",
"t",
".",
... | Delete a table on disk.
It is the same as :func:`table.delete`, but without the need to open
the table first. | [
"Delete",
"a",
"table",
"on",
"disk",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L638-L653 | train | 22,991 |
casacore/python-casacore | casacore/tables/tableutil.py | tableexists | def tableexists(tablename):
"""Test if a table exists."""
result = True
try:
t = table(tablename, ack=False)
except:
result = False
return result | python | def tableexists(tablename):
"""Test if a table exists."""
result = True
try:
t = table(tablename, ack=False)
except:
result = False
return result | [
"def",
"tableexists",
"(",
"tablename",
")",
":",
"result",
"=",
"True",
"try",
":",
"t",
"=",
"table",
"(",
"tablename",
",",
"ack",
"=",
"False",
")",
"except",
":",
"result",
"=",
"False",
"return",
"result"
] | Test if a table exists. | [
"Test",
"if",
"a",
"table",
"exists",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L656-L663 | train | 22,992 |
casacore/python-casacore | casacore/tables/tableutil.py | tableiswritable | def tableiswritable(tablename):
"""Test if a table is writable."""
result = True
try:
t = table(tablename, readonly=False, ack=False)
result = t.iswritable()
except:
result = False
return result | python | def tableiswritable(tablename):
"""Test if a table is writable."""
result = True
try:
t = table(tablename, readonly=False, ack=False)
result = t.iswritable()
except:
result = False
return result | [
"def",
"tableiswritable",
"(",
"tablename",
")",
":",
"result",
"=",
"True",
"try",
":",
"t",
"=",
"table",
"(",
"tablename",
",",
"readonly",
"=",
"False",
",",
"ack",
"=",
"False",
")",
"result",
"=",
"t",
".",
"iswritable",
"(",
")",
"except",
":"... | Test if a table is writable. | [
"Test",
"if",
"a",
"table",
"is",
"writable",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L666-L674 | train | 22,993 |
casacore/python-casacore | casacore/tables/tableutil.py | tablestructure | def tablestructure(tablename, dataman=True, column=True, subtable=False,
sort=False):
"""Print the structure of a table.
It is the same as :func:`table.showstructure`, but without the need to open
the table first.
"""
t = table(tablename, ack=False)
six.print_(t.showstructure(dataman, column, subtable, sort)) | python | def tablestructure(tablename, dataman=True, column=True, subtable=False,
sort=False):
"""Print the structure of a table.
It is the same as :func:`table.showstructure`, but without the need to open
the table first.
"""
t = table(tablename, ack=False)
six.print_(t.showstructure(dataman, column, subtable, sort)) | [
"def",
"tablestructure",
"(",
"tablename",
",",
"dataman",
"=",
"True",
",",
"column",
"=",
"True",
",",
"subtable",
"=",
"False",
",",
"sort",
"=",
"False",
")",
":",
"t",
"=",
"table",
"(",
"tablename",
",",
"ack",
"=",
"False",
")",
"six",
".",
... | Print the structure of a table.
It is the same as :func:`table.showstructure`, but without the need to open
the table first. | [
"Print",
"the",
"structure",
"of",
"a",
"table",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L723-L732 | train | 22,994 |
casacore/python-casacore | casacore/images/image.py | image.attrget | def attrget(self, groupname, attrname, rownr):
"""Get the value of an attribute in the given row in a group."""
return self._attrget(groupname, attrname, rownr) | python | def attrget(self, groupname, attrname, rownr):
"""Get the value of an attribute in the given row in a group."""
return self._attrget(groupname, attrname, rownr) | [
"def",
"attrget",
"(",
"self",
",",
"groupname",
",",
"attrname",
",",
"rownr",
")",
":",
"return",
"self",
".",
"_attrget",
"(",
"groupname",
",",
"attrname",
",",
"rownr",
")"
] | Get the value of an attribute in the given row in a group. | [
"Get",
"the",
"value",
"of",
"an",
"attribute",
"in",
"the",
"given",
"row",
"in",
"a",
"group",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L237-L239 | train | 22,995 |
casacore/python-casacore | casacore/images/image.py | image.attrgetcol | def attrgetcol(self, groupname, attrname):
"""Get the value of an attribute for all rows in a group."""
values = []
for rownr in range(self.attrnrows(groupname)):
values.append(self.attrget(groupname, attrname, rownr))
return values | python | def attrgetcol(self, groupname, attrname):
"""Get the value of an attribute for all rows in a group."""
values = []
for rownr in range(self.attrnrows(groupname)):
values.append(self.attrget(groupname, attrname, rownr))
return values | [
"def",
"attrgetcol",
"(",
"self",
",",
"groupname",
",",
"attrname",
")",
":",
"values",
"=",
"[",
"]",
"for",
"rownr",
"in",
"range",
"(",
"self",
".",
"attrnrows",
"(",
"groupname",
")",
")",
":",
"values",
".",
"append",
"(",
"self",
".",
"attrget... | Get the value of an attribute for all rows in a group. | [
"Get",
"the",
"value",
"of",
"an",
"attribute",
"for",
"all",
"rows",
"in",
"a",
"group",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L241-L246 | train | 22,996 |
casacore/python-casacore | casacore/images/image.py | image.attrfindrows | def attrfindrows(self, groupname, attrname, value):
"""Get the row numbers of all rows where the attribute matches the given value."""
values = self.attrgetcol(groupname, attrname)
return [i for i in range(len(values)) if values[i] == value] | python | def attrfindrows(self, groupname, attrname, value):
"""Get the row numbers of all rows where the attribute matches the given value."""
values = self.attrgetcol(groupname, attrname)
return [i for i in range(len(values)) if values[i] == value] | [
"def",
"attrfindrows",
"(",
"self",
",",
"groupname",
",",
"attrname",
",",
"value",
")",
":",
"values",
"=",
"self",
".",
"attrgetcol",
"(",
"groupname",
",",
"attrname",
")",
"return",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"values",
... | Get the row numbers of all rows where the attribute matches the given value. | [
"Get",
"the",
"row",
"numbers",
"of",
"all",
"rows",
"where",
"the",
"attribute",
"matches",
"the",
"given",
"value",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L248-L251 | train | 22,997 |
casacore/python-casacore | casacore/images/image.py | image.attrgetrow | def attrgetrow(self, groupname, key, value=None):
"""Get the values of all attributes of a row in a group.
If the key is an integer, the key is the row number for which
the attribute values have to be returned.
Otherwise the key has to be a string and it defines the name of an
attribute. The attribute values of the row for which the key matches
the given value is returned.
It can only be used for unique attribute keys. An IndexError exception
is raised if no or multiple matches are found.
"""
if not isinstance(key, str):
return self._attrgetrow(groupname, key)
# The key is an attribute name whose value has to be found.
rownrs = self.attrfindrows(groupname, key, value)
if len(rownrs) == 0:
raise IndexError("Image attribute " + key + " in group " +
groupname + " has no matches for value " +
str(value))
if len(rownrs) > 1:
raise IndexError("Image attribute " + key + " in group " +
groupname + " has multiple matches for value " +
str(value))
return self._attrgetrow(groupname, rownrs[0]) | python | def attrgetrow(self, groupname, key, value=None):
"""Get the values of all attributes of a row in a group.
If the key is an integer, the key is the row number for which
the attribute values have to be returned.
Otherwise the key has to be a string and it defines the name of an
attribute. The attribute values of the row for which the key matches
the given value is returned.
It can only be used for unique attribute keys. An IndexError exception
is raised if no or multiple matches are found.
"""
if not isinstance(key, str):
return self._attrgetrow(groupname, key)
# The key is an attribute name whose value has to be found.
rownrs = self.attrfindrows(groupname, key, value)
if len(rownrs) == 0:
raise IndexError("Image attribute " + key + " in group " +
groupname + " has no matches for value " +
str(value))
if len(rownrs) > 1:
raise IndexError("Image attribute " + key + " in group " +
groupname + " has multiple matches for value " +
str(value))
return self._attrgetrow(groupname, rownrs[0]) | [
"def",
"attrgetrow",
"(",
"self",
",",
"groupname",
",",
"key",
",",
"value",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"return",
"self",
".",
"_attrgetrow",
"(",
"groupname",
",",
"key",
")",
"# The key is an ... | Get the values of all attributes of a row in a group.
If the key is an integer, the key is the row number for which
the attribute values have to be returned.
Otherwise the key has to be a string and it defines the name of an
attribute. The attribute values of the row for which the key matches
the given value is returned.
It can only be used for unique attribute keys. An IndexError exception
is raised if no or multiple matches are found. | [
"Get",
"the",
"values",
"of",
"all",
"attributes",
"of",
"a",
"row",
"in",
"a",
"group",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L253-L277 | train | 22,998 |
casacore/python-casacore | casacore/images/image.py | image.attrput | def attrput(self, groupname, attrname, rownr, value, unit=[], meas=[]):
"""Put the value and optionally unit and measinfo
of an attribute in a row in a group."""
return self._attrput(groupname, attrname, rownr, value, unit, meas) | python | def attrput(self, groupname, attrname, rownr, value, unit=[], meas=[]):
"""Put the value and optionally unit and measinfo
of an attribute in a row in a group."""
return self._attrput(groupname, attrname, rownr, value, unit, meas) | [
"def",
"attrput",
"(",
"self",
",",
"groupname",
",",
"attrname",
",",
"rownr",
",",
"value",
",",
"unit",
"=",
"[",
"]",
",",
"meas",
"=",
"[",
"]",
")",
":",
"return",
"self",
".",
"_attrput",
"(",
"groupname",
",",
"attrname",
",",
"rownr",
",",... | Put the value and optionally unit and measinfo
of an attribute in a row in a group. | [
"Put",
"the",
"value",
"and",
"optionally",
"unit",
"and",
"measinfo",
"of",
"an",
"attribute",
"in",
"a",
"row",
"in",
"a",
"group",
"."
] | 975510861ea005f7919dd9e438b5f98a1682eebe | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L287-L290 | train | 22,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.