id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
226,800 | tino/pyFirmata | pyfirmata/util.py | from_two_bytes | def from_two_bytes(bytes):
"""
Return an integer from two 7 bit bytes.
"""
lsb, msb = bytes
try:
# Usually bytes have been converted to integers with ord already
return msb << 7 | lsb
except TypeError:
# But add this for easy testing
# One of them can be a string, or both
try:
lsb = ord(lsb)
except TypeError:
pass
try:
msb = ord(msb)
except TypeError:
pass
return msb << 7 | lsb | python | def from_two_bytes(bytes):
lsb, msb = bytes
try:
# Usually bytes have been converted to integers with ord already
return msb << 7 | lsb
except TypeError:
# But add this for easy testing
# One of them can be a string, or both
try:
lsb = ord(lsb)
except TypeError:
pass
try:
msb = ord(msb)
except TypeError:
pass
return msb << 7 | lsb | [
"def",
"from_two_bytes",
"(",
"bytes",
")",
":",
"lsb",
",",
"msb",
"=",
"bytes",
"try",
":",
"# Usually bytes have been converted to integers with ord already",
"return",
"msb",
"<<",
"7",
"|",
"lsb",
"except",
"TypeError",
":",
"# But add this for easy testing",
"# ... | Return an integer from two 7 bit bytes. | [
"Return",
"an",
"integer",
"from",
"two",
"7",
"bit",
"bytes",
"."
] | 05881909c4d7c4e808e9ed457144670b2136706e | https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/util.py#L86-L105 |
226,801 | tino/pyFirmata | pyfirmata/util.py | two_byte_iter_to_str | def two_byte_iter_to_str(bytes):
"""
Return a string made from a list of two byte chars.
"""
bytes = list(bytes)
chars = bytearray()
while bytes:
lsb = bytes.pop(0)
try:
msb = bytes.pop(0)
except IndexError:
msb = 0x00
chars.append(from_two_bytes([lsb, msb]))
return chars.decode() | python | def two_byte_iter_to_str(bytes):
bytes = list(bytes)
chars = bytearray()
while bytes:
lsb = bytes.pop(0)
try:
msb = bytes.pop(0)
except IndexError:
msb = 0x00
chars.append(from_two_bytes([lsb, msb]))
return chars.decode() | [
"def",
"two_byte_iter_to_str",
"(",
"bytes",
")",
":",
"bytes",
"=",
"list",
"(",
"bytes",
")",
"chars",
"=",
"bytearray",
"(",
")",
"while",
"bytes",
":",
"lsb",
"=",
"bytes",
".",
"pop",
"(",
"0",
")",
"try",
":",
"msb",
"=",
"bytes",
".",
"pop",... | Return a string made from a list of two byte chars. | [
"Return",
"a",
"string",
"made",
"from",
"a",
"list",
"of",
"two",
"byte",
"chars",
"."
] | 05881909c4d7c4e808e9ed457144670b2136706e | https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/util.py#L108-L121 |
226,802 | tino/pyFirmata | pyfirmata/util.py | str_to_two_byte_iter | def str_to_two_byte_iter(string):
"""
Return a iter consisting of two byte chars from a string.
"""
bstring = string.encode()
bytes = bytearray()
for char in bstring:
bytes.append(char)
bytes.append(0)
return bytes | python | def str_to_two_byte_iter(string):
bstring = string.encode()
bytes = bytearray()
for char in bstring:
bytes.append(char)
bytes.append(0)
return bytes | [
"def",
"str_to_two_byte_iter",
"(",
"string",
")",
":",
"bstring",
"=",
"string",
".",
"encode",
"(",
")",
"bytes",
"=",
"bytearray",
"(",
")",
"for",
"char",
"in",
"bstring",
":",
"bytes",
".",
"append",
"(",
"char",
")",
"bytes",
".",
"append",
"(",
... | Return a iter consisting of two byte chars from a string. | [
"Return",
"a",
"iter",
"consisting",
"of",
"two",
"byte",
"chars",
"from",
"a",
"string",
"."
] | 05881909c4d7c4e808e9ed457144670b2136706e | https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/util.py#L124-L133 |
226,803 | tino/pyFirmata | pyfirmata/mockup.py | MockupSerial.write | def write(self, value):
"""
Appends bytes flat to the deque. So iterables will be unpacked.
"""
if hasattr(value, '__iter__'):
bytearray(value)
self.extend(value)
else:
bytearray([value])
self.append(value) | python | def write(self, value):
if hasattr(value, '__iter__'):
bytearray(value)
self.extend(value)
else:
bytearray([value])
self.append(value) | [
"def",
"write",
"(",
"self",
",",
"value",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"'__iter__'",
")",
":",
"bytearray",
"(",
"value",
")",
"self",
".",
"extend",
"(",
"value",
")",
"else",
":",
"bytearray",
"(",
"[",
"value",
"]",
")",
"self"... | Appends bytes flat to the deque. So iterables will be unpacked. | [
"Appends",
"bytes",
"flat",
"to",
"the",
"deque",
".",
"So",
"iterables",
"will",
"be",
"unpacked",
"."
] | 05881909c4d7c4e808e9ed457144670b2136706e | https://github.com/tino/pyFirmata/blob/05881909c4d7c4e808e9ed457144670b2136706e/pyfirmata/mockup.py#L32-L41 |
226,804 | python-escpos/python-escpos | src/escpos/capabilities.py | get_profile | def get_profile(name=None, **kwargs):
"""Get the profile by name; if no name is given, return the
default profile.
"""
if isinstance(name, Profile):
return name
clazz = get_profile_class(name or 'default')
return clazz(**kwargs) | python | def get_profile(name=None, **kwargs):
if isinstance(name, Profile):
return name
clazz = get_profile_class(name or 'default')
return clazz(**kwargs) | [
"def",
"get_profile",
"(",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"Profile",
")",
":",
"return",
"name",
"clazz",
"=",
"get_profile_class",
"(",
"name",
"or",
"'default'",
")",
"return",
"clazz",
"... | Get the profile by name; if no name is given, return the
default profile. | [
"Get",
"the",
"profile",
"by",
"name",
";",
"if",
"no",
"name",
"is",
"given",
"return",
"the",
"default",
"profile",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/capabilities.py#L99-L107 |
226,805 | python-escpos/python-escpos | src/escpos/capabilities.py | get_profile_class | def get_profile_class(name):
"""For the given profile name, load the data from the external
database, then generate dynamically a class.
"""
if name not in CLASS_CACHE:
profile_data = PROFILES[name]
profile_name = clean(name)
class_name = '{}{}Profile'.format(
profile_name[0].upper(), profile_name[1:])
new_class = type(class_name, (BaseProfile,), {'profile_data': profile_data})
CLASS_CACHE[name] = new_class
return CLASS_CACHE[name] | python | def get_profile_class(name):
if name not in CLASS_CACHE:
profile_data = PROFILES[name]
profile_name = clean(name)
class_name = '{}{}Profile'.format(
profile_name[0].upper(), profile_name[1:])
new_class = type(class_name, (BaseProfile,), {'profile_data': profile_data})
CLASS_CACHE[name] = new_class
return CLASS_CACHE[name] | [
"def",
"get_profile_class",
"(",
"name",
")",
":",
"if",
"name",
"not",
"in",
"CLASS_CACHE",
":",
"profile_data",
"=",
"PROFILES",
"[",
"name",
"]",
"profile_name",
"=",
"clean",
"(",
"name",
")",
"class_name",
"=",
"'{}{}Profile'",
".",
"format",
"(",
"pr... | For the given profile name, load the data from the external
database, then generate dynamically a class. | [
"For",
"the",
"given",
"profile",
"name",
"load",
"the",
"data",
"from",
"the",
"external",
"database",
"then",
"generate",
"dynamically",
"a",
"class",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/capabilities.py#L113-L125 |
226,806 | python-escpos/python-escpos | src/escpos/capabilities.py | BaseProfile.get_font | def get_font(self, font):
"""Return the escpos index for `font`. Makes sure that
the requested `font` is valid.
"""
font = {'a': 0, 'b': 1}.get(font, font)
if not six.text_type(font) in self.fonts:
raise NotSupported(
'"{}" is not a valid font in the current profile'.format(font))
return font | python | def get_font(self, font):
font = {'a': 0, 'b': 1}.get(font, font)
if not six.text_type(font) in self.fonts:
raise NotSupported(
'"{}" is not a valid font in the current profile'.format(font))
return font | [
"def",
"get_font",
"(",
"self",
",",
"font",
")",
":",
"font",
"=",
"{",
"'a'",
":",
"0",
",",
"'b'",
":",
"1",
"}",
".",
"get",
"(",
"font",
",",
"font",
")",
"if",
"not",
"six",
".",
"text_type",
"(",
"font",
")",
"in",
"self",
".",
"fonts"... | Return the escpos index for `font`. Makes sure that
the requested `font` is valid. | [
"Return",
"the",
"escpos",
"index",
"for",
"font",
".",
"Makes",
"sure",
"that",
"the",
"requested",
"font",
"is",
"valid",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/capabilities.py#L72-L80 |
226,807 | python-escpos/python-escpos | src/escpos/capabilities.py | BaseProfile.get_columns | def get_columns(self, font):
""" Return the number of columns for the given font.
"""
font = self.get_font(font)
return self.fonts[six.text_type(font)]['columns'] | python | def get_columns(self, font):
font = self.get_font(font)
return self.fonts[six.text_type(font)]['columns'] | [
"def",
"get_columns",
"(",
"self",
",",
"font",
")",
":",
"font",
"=",
"self",
".",
"get_font",
"(",
"font",
")",
"return",
"self",
".",
"fonts",
"[",
"six",
".",
"text_type",
"(",
"font",
")",
"]",
"[",
"'columns'",
"]"
] | Return the number of columns for the given font. | [
"Return",
"the",
"number",
"of",
"columns",
"for",
"the",
"given",
"font",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/capabilities.py#L82-L86 |
226,808 | python-escpos/python-escpos | src/escpos/config.py | Config._reset_config | def _reset_config(self):
""" Clear the loaded configuration.
If we are loading a changed config, we don't want to have leftover
data.
"""
self._has_loaded = False
self._printer = None
self._printer_name = None
self._printer_config = None | python | def _reset_config(self):
self._has_loaded = False
self._printer = None
self._printer_name = None
self._printer_config = None | [
"def",
"_reset_config",
"(",
"self",
")",
":",
"self",
".",
"_has_loaded",
"=",
"False",
"self",
".",
"_printer",
"=",
"None",
"self",
".",
"_printer_name",
"=",
"None",
"self",
".",
"_printer_config",
"=",
"None"
] | Clear the loaded configuration.
If we are loading a changed config, we don't want to have leftover
data. | [
"Clear",
"the",
"loaded",
"configuration",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/config.py#L41-L51 |
226,809 | python-escpos/python-escpos | src/escpos/config.py | Config.load | def load(self, config_path=None):
""" Load and parse the configuration file using pyyaml
:param config_path: An optional file path, file handle, or byte string
for the configuration file.
"""
self._reset_config()
if not config_path:
config_path = os.path.join(
appdirs.user_config_dir(self._app_name),
self._config_file
)
try:
# First check if it's file like. If it is, pyyaml can load it.
# I'm checking type instead of catching exceptions to keep the
# exception handling simple
if hasattr(config_path, 'read'):
config = yaml.safe_load(config_path)
else:
# If it isn't, it's a path. We have to open it first, otherwise
# pyyaml will try to read it as yaml
with open(config_path, 'rb') as config_file:
config = yaml.safe_load(config_file)
except EnvironmentError:
raise exceptions.ConfigNotFoundError('Couldn\'t read config at {config_path}'.format(
config_path=str(config_path),
))
except yaml.YAMLError:
raise exceptions.ConfigSyntaxError('Error parsing YAML')
if 'printer' in config:
self._printer_config = config['printer']
self._printer_name = self._printer_config.pop('type').title()
if not self._printer_name or not hasattr(printer, self._printer_name):
raise exceptions.ConfigSyntaxError(
'Printer type "{printer_name}" is invalid'.format(
printer_name=self._printer_name,
)
)
self._has_loaded = True | python | def load(self, config_path=None):
self._reset_config()
if not config_path:
config_path = os.path.join(
appdirs.user_config_dir(self._app_name),
self._config_file
)
try:
# First check if it's file like. If it is, pyyaml can load it.
# I'm checking type instead of catching exceptions to keep the
# exception handling simple
if hasattr(config_path, 'read'):
config = yaml.safe_load(config_path)
else:
# If it isn't, it's a path. We have to open it first, otherwise
# pyyaml will try to read it as yaml
with open(config_path, 'rb') as config_file:
config = yaml.safe_load(config_file)
except EnvironmentError:
raise exceptions.ConfigNotFoundError('Couldn\'t read config at {config_path}'.format(
config_path=str(config_path),
))
except yaml.YAMLError:
raise exceptions.ConfigSyntaxError('Error parsing YAML')
if 'printer' in config:
self._printer_config = config['printer']
self._printer_name = self._printer_config.pop('type').title()
if not self._printer_name or not hasattr(printer, self._printer_name):
raise exceptions.ConfigSyntaxError(
'Printer type "{printer_name}" is invalid'.format(
printer_name=self._printer_name,
)
)
self._has_loaded = True | [
"def",
"load",
"(",
"self",
",",
"config_path",
"=",
"None",
")",
":",
"self",
".",
"_reset_config",
"(",
")",
"if",
"not",
"config_path",
":",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"appdirs",
".",
"user_config_dir",
"(",
"self",
"."... | Load and parse the configuration file using pyyaml
:param config_path: An optional file path, file handle, or byte string
for the configuration file. | [
"Load",
"and",
"parse",
"the",
"configuration",
"file",
"using",
"pyyaml"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/config.py#L53-L98 |
226,810 | python-escpos/python-escpos | src/escpos/config.py | Config.printer | def printer(self):
""" Returns a printer that was defined in the config, or throws an
exception.
This method loads the default config if one hasn't beeen already loaded.
"""
if not self._has_loaded:
self.load()
if not self._printer_name:
raise exceptions.ConfigSectionMissingError('printer')
if not self._printer:
# We could catch init errors and make them a ConfigSyntaxError,
# but I'll just let them pass
self._printer = getattr(printer, self._printer_name)(**self._printer_config)
return self._printer | python | def printer(self):
if not self._has_loaded:
self.load()
if not self._printer_name:
raise exceptions.ConfigSectionMissingError('printer')
if not self._printer:
# We could catch init errors and make them a ConfigSyntaxError,
# but I'll just let them pass
self._printer = getattr(printer, self._printer_name)(**self._printer_config)
return self._printer | [
"def",
"printer",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_has_loaded",
":",
"self",
".",
"load",
"(",
")",
"if",
"not",
"self",
".",
"_printer_name",
":",
"raise",
"exceptions",
".",
"ConfigSectionMissingError",
"(",
"'printer'",
")",
"if",
"n... | Returns a printer that was defined in the config, or throws an
exception.
This method loads the default config if one hasn't beeen already loaded. | [
"Returns",
"a",
"printer",
"that",
"was",
"defined",
"in",
"the",
"config",
"or",
"throws",
"an",
"exception",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/config.py#L100-L118 |
226,811 | python-escpos/python-escpos | src/escpos/printer.py | Usb.open | def open(self, usb_args):
""" Search device on USB tree and set it as escpos device.
:param usb_args: USB arguments
"""
self.device = usb.core.find(**usb_args)
if self.device is None:
raise USBNotFoundError("Device not found or cable not plugged in.")
self.idVendor = self.device.idVendor
self.idProduct = self.device.idProduct
check_driver = None
try:
check_driver = self.device.is_kernel_driver_active(0)
except NotImplementedError:
pass
if check_driver is None or check_driver:
try:
self.device.detach_kernel_driver(0)
except usb.core.USBError as e:
if check_driver is not None:
print("Could not detatch kernel driver: {0}".format(str(e)))
try:
self.device.set_configuration()
self.device.reset()
except usb.core.USBError as e:
print("Could not set configuration: {0}".format(str(e))) | python | def open(self, usb_args):
self.device = usb.core.find(**usb_args)
if self.device is None:
raise USBNotFoundError("Device not found or cable not plugged in.")
self.idVendor = self.device.idVendor
self.idProduct = self.device.idProduct
check_driver = None
try:
check_driver = self.device.is_kernel_driver_active(0)
except NotImplementedError:
pass
if check_driver is None or check_driver:
try:
self.device.detach_kernel_driver(0)
except usb.core.USBError as e:
if check_driver is not None:
print("Could not detatch kernel driver: {0}".format(str(e)))
try:
self.device.set_configuration()
self.device.reset()
except usb.core.USBError as e:
print("Could not set configuration: {0}".format(str(e))) | [
"def",
"open",
"(",
"self",
",",
"usb_args",
")",
":",
"self",
".",
"device",
"=",
"usb",
".",
"core",
".",
"find",
"(",
"*",
"*",
"usb_args",
")",
"if",
"self",
".",
"device",
"is",
"None",
":",
"raise",
"USBNotFoundError",
"(",
"\"Device not found or... | Search device on USB tree and set it as escpos device.
:param usb_args: USB arguments | [
"Search",
"device",
"on",
"USB",
"tree",
"and",
"set",
"it",
"as",
"escpos",
"device",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/printer.py#L59-L89 |
226,812 | python-escpos/python-escpos | src/escpos/printer.py | Usb.close | def close(self):
""" Release USB interface """
if self.device:
usb.util.dispose_resources(self.device)
self.device = None | python | def close(self):
if self.device:
usb.util.dispose_resources(self.device)
self.device = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"device",
":",
"usb",
".",
"util",
".",
"dispose_resources",
"(",
"self",
".",
"device",
")",
"self",
".",
"device",
"=",
"None"
] | Release USB interface | [
"Release",
"USB",
"interface"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/printer.py#L103-L107 |
226,813 | python-escpos/python-escpos | src/escpos/printer.py | Serial.close | def close(self):
""" Close Serial interface """
if self.device is not None and self.device.is_open:
self.device.flush()
self.device.close() | python | def close(self):
if self.device is not None and self.device.is_open:
self.device.flush()
self.device.close() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"device",
"is",
"not",
"None",
"and",
"self",
".",
"device",
".",
"is_open",
":",
"self",
".",
"device",
".",
"flush",
"(",
")",
"self",
".",
"device",
".",
"close",
"(",
")"
] | Close Serial interface | [
"Close",
"Serial",
"interface"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/printer.py#L174-L178 |
226,814 | python-escpos/python-escpos | src/escpos/printer.py | Network.open | def open(self):
""" Open TCP socket with ``socket``-library and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.settimeout(self.timeout)
self.device.connect((self.host, self.port))
if self.device is None:
print("Could not open socket for {0}".format(self.host)) | python | def open(self):
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.settimeout(self.timeout)
self.device.connect((self.host, self.port))
if self.device is None:
print("Could not open socket for {0}".format(self.host)) | [
"def",
"open",
"(",
"self",
")",
":",
"self",
".",
"device",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"self",
".",
"device",
".",
"settimeout",
"(",
"self",
".",
"timeout",
")",
"self",
".",... | Open TCP socket with ``socket``-library and set it as escpos device | [
"Open",
"TCP",
"socket",
"with",
"socket",
"-",
"library",
"and",
"set",
"it",
"as",
"escpos",
"device"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/printer.py#L217-L224 |
226,815 | python-escpos/python-escpos | src/escpos/printer.py | File.open | def open(self):
""" Open system file """
self.device = open(self.devfile, "wb")
if self.device is None:
print("Could not open the specified file {0}".format(self.devfile)) | python | def open(self):
self.device = open(self.devfile, "wb")
if self.device is None:
print("Could not open the specified file {0}".format(self.devfile)) | [
"def",
"open",
"(",
"self",
")",
":",
"self",
".",
"device",
"=",
"open",
"(",
"self",
".",
"devfile",
",",
"\"wb\"",
")",
"if",
"self",
".",
"device",
"is",
"None",
":",
"print",
"(",
"\"Could not open the specified file {0}\"",
".",
"format",
"(",
"sel... | Open system file | [
"Open",
"system",
"file"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/printer.py#L266-L271 |
226,816 | python-escpos/python-escpos | src/escpos/katakana.py | encode_katakana | def encode_katakana(text):
"""I don't think this quite works yet."""
encoded = []
for char in text:
if jaconv:
# try to convert japanese text to half-katakanas
char = jaconv.z2h(jaconv.hira2kata(char))
# TODO: "the conversion may result in multiple characters"
# If that really can happen (I am not really shure), than the string would have to be split and every single
# character has to passed through the following lines.
if char in TXT_ENC_KATAKANA_MAP:
encoded.append(TXT_ENC_KATAKANA_MAP[char])
else:
# TODO doesn't this discard all that is not in the map? Can we be sure that the input does contain only
# encodable characters? We could at least throw an exception if encoding is not possible.
pass
return b"".join(encoded) | python | def encode_katakana(text):
encoded = []
for char in text:
if jaconv:
# try to convert japanese text to half-katakanas
char = jaconv.z2h(jaconv.hira2kata(char))
# TODO: "the conversion may result in multiple characters"
# If that really can happen (I am not really shure), than the string would have to be split and every single
# character has to passed through the following lines.
if char in TXT_ENC_KATAKANA_MAP:
encoded.append(TXT_ENC_KATAKANA_MAP[char])
else:
# TODO doesn't this discard all that is not in the map? Can we be sure that the input does contain only
# encodable characters? We could at least throw an exception if encoding is not possible.
pass
return b"".join(encoded) | [
"def",
"encode_katakana",
"(",
"text",
")",
":",
"encoded",
"=",
"[",
"]",
"for",
"char",
"in",
"text",
":",
"if",
"jaconv",
":",
"# try to convert japanese text to half-katakanas",
"char",
"=",
"jaconv",
".",
"z2h",
"(",
"jaconv",
".",
"hira2kata",
"(",
"ch... | I don't think this quite works yet. | [
"I",
"don",
"t",
"think",
"this",
"quite",
"works",
"yet",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/katakana.py#L19-L36 |
226,817 | python-escpos/python-escpos | src/escpos/image.py | EscposImage.to_column_format | def to_column_format(self, high_density_vertical=True):
"""
Extract slices of an image as equal-sized blobs of column-format data.
:param high_density_vertical: Printed line height in dots
"""
im = self._im.transpose(Image.ROTATE_270).transpose(Image.FLIP_LEFT_RIGHT)
line_height = 24 if high_density_vertical else 8
width_pixels, height_pixels = im.size
top = 0
left = 0
while left < width_pixels:
box = (left, top, left + line_height, top + height_pixels)
im_slice = im.transform((line_height, height_pixels), Image.EXTENT, box)
im_bytes = im_slice.tobytes()
yield(im_bytes)
left += line_height | python | def to_column_format(self, high_density_vertical=True):
im = self._im.transpose(Image.ROTATE_270).transpose(Image.FLIP_LEFT_RIGHT)
line_height = 24 if high_density_vertical else 8
width_pixels, height_pixels = im.size
top = 0
left = 0
while left < width_pixels:
box = (left, top, left + line_height, top + height_pixels)
im_slice = im.transform((line_height, height_pixels), Image.EXTENT, box)
im_bytes = im_slice.tobytes()
yield(im_bytes)
left += line_height | [
"def",
"to_column_format",
"(",
"self",
",",
"high_density_vertical",
"=",
"True",
")",
":",
"im",
"=",
"self",
".",
"_im",
".",
"transpose",
"(",
"Image",
".",
"ROTATE_270",
")",
".",
"transpose",
"(",
"Image",
".",
"FLIP_LEFT_RIGHT",
")",
"line_height",
... | Extract slices of an image as equal-sized blobs of column-format data.
:param high_density_vertical: Printed line height in dots | [
"Extract",
"slices",
"of",
"an",
"image",
"as",
"equal",
"-",
"sized",
"blobs",
"of",
"column",
"-",
"format",
"data",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/image.py#L77-L93 |
226,818 | python-escpos/python-escpos | src/escpos/image.py | EscposImage.split | def split(self, fragment_height):
"""
Split an image into multiple fragments after fragment_height pixels
:param fragment_height: height of fragment
:return: list of PIL objects
"""
passes = int(math.ceil(self.height/fragment_height))
fragments = []
for n in range(0, passes):
left = 0
right = self.width
upper = n * fragment_height
lower = min((n + 1) * fragment_height, self.height)
box = (left, upper, right, lower)
fragments.append(self.img_original.crop(box))
return fragments | python | def split(self, fragment_height):
passes = int(math.ceil(self.height/fragment_height))
fragments = []
for n in range(0, passes):
left = 0
right = self.width
upper = n * fragment_height
lower = min((n + 1) * fragment_height, self.height)
box = (left, upper, right, lower)
fragments.append(self.img_original.crop(box))
return fragments | [
"def",
"split",
"(",
"self",
",",
"fragment_height",
")",
":",
"passes",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"self",
".",
"height",
"/",
"fragment_height",
")",
")",
"fragments",
"=",
"[",
"]",
"for",
"n",
"in",
"range",
"(",
"0",
",",
"pass... | Split an image into multiple fragments after fragment_height pixels
:param fragment_height: height of fragment
:return: list of PIL objects | [
"Split",
"an",
"image",
"into",
"multiple",
"fragments",
"after",
"fragment_height",
"pixels"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/image.py#L101-L117 |
226,819 | python-escpos/python-escpos | src/escpos/image.py | EscposImage.center | def center(self, max_width):
"""In-place image centering
:param: Maximum width in order to deduce x offset for centering
:return: None
"""
old_width, height = self._im.size
new_size = (max_width, height)
new_im = Image.new("1", new_size)
paste_x = int((max_width - old_width) / 2)
new_im.paste(self._im, (paste_x, 0))
self._im = new_im | python | def center(self, max_width):
old_width, height = self._im.size
new_size = (max_width, height)
new_im = Image.new("1", new_size)
paste_x = int((max_width - old_width) / 2)
new_im.paste(self._im, (paste_x, 0))
self._im = new_im | [
"def",
"center",
"(",
"self",
",",
"max_width",
")",
":",
"old_width",
",",
"height",
"=",
"self",
".",
"_im",
".",
"size",
"new_size",
"=",
"(",
"max_width",
",",
"height",
")",
"new_im",
"=",
"Image",
".",
"new",
"(",
"\"1\"",
",",
"new_size",
")",... | In-place image centering
:param: Maximum width in order to deduce x offset for centering
:return: None | [
"In",
"-",
"place",
"image",
"centering"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/image.py#L119-L133 |
226,820 | python-escpos/python-escpos | src/escpos/cli.py | demo | def demo(printer, **kwargs):
"""
Prints specificed demos. Called when CLI is passed `demo`. This function
uses the DEMO_FUNCTIONS dictionary.
:param printer: A printer from escpos.printer
:param kwargs: A dict with a key for each function you want to test. It's
in this format since it usually comes from argparse.
"""
for demo_choice in kwargs.keys():
command = getattr(
printer,
demo_choice
.replace('barcodes_a', 'barcode')
.replace('barcodes_b', 'barcode')
)
for params in DEMO_FUNCTIONS[demo_choice]:
command(**params)
printer.cut() | python | def demo(printer, **kwargs):
for demo_choice in kwargs.keys():
command = getattr(
printer,
demo_choice
.replace('barcodes_a', 'barcode')
.replace('barcodes_b', 'barcode')
)
for params in DEMO_FUNCTIONS[demo_choice]:
command(**params)
printer.cut() | [
"def",
"demo",
"(",
"printer",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"demo_choice",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"command",
"=",
"getattr",
"(",
"printer",
",",
"demo_choice",
".",
"replace",
"(",
"'barcodes_a'",
",",
"'barcode'",
")... | Prints specificed demos. Called when CLI is passed `demo`. This function
uses the DEMO_FUNCTIONS dictionary.
:param printer: A printer from escpos.printer
:param kwargs: A dict with a key for each function you want to test. It's
in this format since it usually comes from argparse. | [
"Prints",
"specificed",
"demos",
".",
"Called",
"when",
"CLI",
"is",
"passed",
"demo",
".",
"This",
"function",
"uses",
"the",
"DEMO_FUNCTIONS",
"dictionary",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/cli.py#L559-L577 |
226,821 | python-escpos/python-escpos | src/escpos/magicencode.py | Encoder.get_encoding_name | def get_encoding_name(self, encoding):
"""Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437.
"""
encoding = CodePages.get_encoding_name(encoding)
if encoding not in self.codepages:
raise ValueError((
'Encoding "{}" cannot be used for the current profile. '
'Valid encodings are: {}'
).format(encoding, ','.join(self.codepages.keys())))
return encoding | python | def get_encoding_name(self, encoding):
encoding = CodePages.get_encoding_name(encoding)
if encoding not in self.codepages:
raise ValueError((
'Encoding "{}" cannot be used for the current profile. '
'Valid encodings are: {}'
).format(encoding, ','.join(self.codepages.keys())))
return encoding | [
"def",
"get_encoding_name",
"(",
"self",
",",
"encoding",
")",
":",
"encoding",
"=",
"CodePages",
".",
"get_encoding_name",
"(",
"encoding",
")",
"if",
"encoding",
"not",
"in",
"self",
".",
"codepages",
":",
"raise",
"ValueError",
"(",
"(",
"'Encoding \"{}\" c... | Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437. | [
"Given",
"an",
"encoding",
"provided",
"by",
"the",
"user",
"will",
"return",
"a",
"canonical",
"encoding",
"name",
";",
"and",
"also",
"validate",
"that",
"the",
"encoding",
"is",
"supported",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L53-L66 |
226,822 | python-escpos/python-escpos | src/escpos/magicencode.py | Encoder._get_codepage_char_list | def _get_codepage_char_list(encoding):
"""Get codepage character list
Gets characters 128-255 for a given code page, as an array.
:param encoding: The name of the encoding. This must appear in the CodePage list
"""
codepage = CodePages.get_encoding(encoding)
if 'data' in codepage:
encodable_chars = list("".join(codepage['data']))
assert(len(encodable_chars) == 128)
return encodable_chars
elif 'python_encode' in codepage:
encodable_chars = [u" "] * 128
for i in range(0, 128):
codepoint = i + 128
try:
encodable_chars[i] = bytes([codepoint]).decode(codepage['python_encode'])
except UnicodeDecodeError:
# Non-encodable character, just skip it
pass
return encodable_chars
raise LookupError("Can't find a known encoding for {}".format(encoding)) | python | def _get_codepage_char_list(encoding):
codepage = CodePages.get_encoding(encoding)
if 'data' in codepage:
encodable_chars = list("".join(codepage['data']))
assert(len(encodable_chars) == 128)
return encodable_chars
elif 'python_encode' in codepage:
encodable_chars = [u" "] * 128
for i in range(0, 128):
codepoint = i + 128
try:
encodable_chars[i] = bytes([codepoint]).decode(codepage['python_encode'])
except UnicodeDecodeError:
# Non-encodable character, just skip it
pass
return encodable_chars
raise LookupError("Can't find a known encoding for {}".format(encoding)) | [
"def",
"_get_codepage_char_list",
"(",
"encoding",
")",
":",
"codepage",
"=",
"CodePages",
".",
"get_encoding",
"(",
"encoding",
")",
"if",
"'data'",
"in",
"codepage",
":",
"encodable_chars",
"=",
"list",
"(",
"\"\"",
".",
"join",
"(",
"codepage",
"[",
"'dat... | Get codepage character list
Gets characters 128-255 for a given code page, as an array.
:param encoding: The name of the encoding. This must appear in the CodePage list | [
"Get",
"codepage",
"character",
"list"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L69-L91 |
226,823 | python-escpos/python-escpos | src/escpos/magicencode.py | Encoder._get_codepage_char_map | def _get_codepage_char_map(self, encoding):
""" Get codepage character map
Process an encoding and return a map of UTF-characters to code points
in this encoding.
This is generated once only, and returned from a cache.
:param encoding: The name of the encoding.
"""
# Skip things that were loaded previously
if encoding in self.available_characters:
return self.available_characters[encoding]
codepage_char_list = self._get_codepage_char_list(encoding)
codepage_char_map = dict((utf8, i + 128) for (i, utf8) in enumerate(codepage_char_list))
self.available_characters[encoding] = codepage_char_map
return codepage_char_map | python | def _get_codepage_char_map(self, encoding):
# Skip things that were loaded previously
if encoding in self.available_characters:
return self.available_characters[encoding]
codepage_char_list = self._get_codepage_char_list(encoding)
codepage_char_map = dict((utf8, i + 128) for (i, utf8) in enumerate(codepage_char_list))
self.available_characters[encoding] = codepage_char_map
return codepage_char_map | [
"def",
"_get_codepage_char_map",
"(",
"self",
",",
"encoding",
")",
":",
"# Skip things that were loaded previously",
"if",
"encoding",
"in",
"self",
".",
"available_characters",
":",
"return",
"self",
".",
"available_characters",
"[",
"encoding",
"]",
"codepage_char_li... | Get codepage character map
Process an encoding and return a map of UTF-characters to code points
in this encoding.
This is generated once only, and returned from a cache.
:param encoding: The name of the encoding. | [
"Get",
"codepage",
"character",
"map"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L93-L109 |
226,824 | python-escpos/python-escpos | src/escpos/magicencode.py | Encoder.can_encode | def can_encode(self, encoding, char):
"""Determine if a character is encodeable in the given code page.
:param encoding: The name of the encoding.
:param char: The character to attempt to encode.
"""
available_map = {}
try:
available_map = self._get_codepage_char_map(encoding)
except LookupError:
return False
# Decide whether this character is encodeable in this code page
is_ascii = ord(char) < 128
is_encodable = char in available_map
return is_ascii or is_encodable | python | def can_encode(self, encoding, char):
available_map = {}
try:
available_map = self._get_codepage_char_map(encoding)
except LookupError:
return False
# Decide whether this character is encodeable in this code page
is_ascii = ord(char) < 128
is_encodable = char in available_map
return is_ascii or is_encodable | [
"def",
"can_encode",
"(",
"self",
",",
"encoding",
",",
"char",
")",
":",
"available_map",
"=",
"{",
"}",
"try",
":",
"available_map",
"=",
"self",
".",
"_get_codepage_char_map",
"(",
"encoding",
")",
"except",
"LookupError",
":",
"return",
"False",
"# Decid... | Determine if a character is encodeable in the given code page.
:param encoding: The name of the encoding.
:param char: The character to attempt to encode. | [
"Determine",
"if",
"a",
"character",
"is",
"encodeable",
"in",
"the",
"given",
"code",
"page",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L111-L126 |
226,825 | python-escpos/python-escpos | src/escpos/magicencode.py | Encoder._encode_char | def _encode_char(char, charmap, defaultchar):
""" Encode a single character with the given encoding map
:param char: char to encode
:param charmap: dictionary for mapping characters in this code page
"""
if ord(char) < 128:
return ord(char)
if char in charmap:
return charmap[char]
return ord(defaultchar) | python | def _encode_char(char, charmap, defaultchar):
if ord(char) < 128:
return ord(char)
if char in charmap:
return charmap[char]
return ord(defaultchar) | [
"def",
"_encode_char",
"(",
"char",
",",
"charmap",
",",
"defaultchar",
")",
":",
"if",
"ord",
"(",
"char",
")",
"<",
"128",
":",
"return",
"ord",
"(",
"char",
")",
"if",
"char",
"in",
"charmap",
":",
"return",
"charmap",
"[",
"char",
"]",
"return",
... | Encode a single character with the given encoding map
:param char: char to encode
:param charmap: dictionary for mapping characters in this code page | [
"Encode",
"a",
"single",
"character",
"with",
"the",
"given",
"encoding",
"map"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L129-L139 |
226,826 | python-escpos/python-escpos | src/escpos/magicencode.py | Encoder.encode | def encode(self, text, encoding, defaultchar='?'):
""" Encode text under the given encoding
:param text: Text to encode
:param encoding: Encoding name to use (must be defined in capabilities)
:param defaultchar: Fallback for non-encodable characters
"""
codepage_char_map = self._get_codepage_char_map(encoding)
output_bytes = bytes([self._encode_char(char, codepage_char_map, defaultchar) for char in text])
return output_bytes | python | def encode(self, text, encoding, defaultchar='?'):
codepage_char_map = self._get_codepage_char_map(encoding)
output_bytes = bytes([self._encode_char(char, codepage_char_map, defaultchar) for char in text])
return output_bytes | [
"def",
"encode",
"(",
"self",
",",
"text",
",",
"encoding",
",",
"defaultchar",
"=",
"'?'",
")",
":",
"codepage_char_map",
"=",
"self",
".",
"_get_codepage_char_map",
"(",
"encoding",
")",
"output_bytes",
"=",
"bytes",
"(",
"[",
"self",
".",
"_encode_char",
... | Encode text under the given encoding
:param text: Text to encode
:param encoding: Encoding name to use (must be defined in capabilities)
:param defaultchar: Fallback for non-encodable characters | [
"Encode",
"text",
"under",
"the",
"given",
"encoding"
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L141-L150 |
226,827 | python-escpos/python-escpos | src/escpos/magicencode.py | MagicEncode.force_encoding | def force_encoding(self, encoding):
"""Sets a fixed encoding. The change is emitted right away.
From now one, this buffer will switch the code page anymore.
However, it will still keep track of the current code page.
"""
if not encoding:
self.disabled = False
else:
self.write_with_encoding(encoding, None)
self.disabled = True | python | def force_encoding(self, encoding):
if not encoding:
self.disabled = False
else:
self.write_with_encoding(encoding, None)
self.disabled = True | [
"def",
"force_encoding",
"(",
"self",
",",
"encoding",
")",
":",
"if",
"not",
"encoding",
":",
"self",
".",
"disabled",
"=",
"False",
"else",
":",
"self",
".",
"write_with_encoding",
"(",
"encoding",
",",
"None",
")",
"self",
".",
"disabled",
"=",
"True"... | Sets a fixed encoding. The change is emitted right away.
From now one, this buffer will switch the code page anymore.
However, it will still keep track of the current code page. | [
"Sets",
"a",
"fixed",
"encoding",
".",
"The",
"change",
"is",
"emitted",
"right",
"away",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L235-L245 |
226,828 | python-escpos/python-escpos | src/escpos/magicencode.py | MagicEncode.write | def write(self, text):
"""Write the text, automatically switching encodings.
"""
if self.disabled:
self.write_with_encoding(self.encoding, text)
return
# See how far we can go into the text with the current encoding
to_write, text = split_writable_text(self.encoder, text, self.encoding)
if to_write:
self.write_with_encoding(self.encoding, to_write)
while text:
# See if any of the code pages that the printer profile
# supports can encode this character.
encoding = self.encoder.find_suitable_encoding(text[0])
if not encoding:
self._handle_character_failed(text[0])
text = text[1:]
continue
# Write as much text as possible with the encoding found.
to_write, text = split_writable_text(self.encoder, text, encoding)
if to_write:
self.write_with_encoding(encoding, to_write) | python | def write(self, text):
if self.disabled:
self.write_with_encoding(self.encoding, text)
return
# See how far we can go into the text with the current encoding
to_write, text = split_writable_text(self.encoder, text, self.encoding)
if to_write:
self.write_with_encoding(self.encoding, to_write)
while text:
# See if any of the code pages that the printer profile
# supports can encode this character.
encoding = self.encoder.find_suitable_encoding(text[0])
if not encoding:
self._handle_character_failed(text[0])
text = text[1:]
continue
# Write as much text as possible with the encoding found.
to_write, text = split_writable_text(self.encoder, text, encoding)
if to_write:
self.write_with_encoding(encoding, to_write) | [
"def",
"write",
"(",
"self",
",",
"text",
")",
":",
"if",
"self",
".",
"disabled",
":",
"self",
".",
"write_with_encoding",
"(",
"self",
".",
"encoding",
",",
"text",
")",
"return",
"# See how far we can go into the text with the current encoding",
"to_write",
","... | Write the text, automatically switching encodings. | [
"Write",
"the",
"text",
"automatically",
"switching",
"encodings",
"."
] | 52719c0b7de8948fabdffd180a2d71c22cf4c02b | https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/magicencode.py#L247-L272 |
226,829 | elapouya/python-docx-template | docxtpl/__init__.py | DocxTemplate.replace_media | def replace_media(self,src_file,dst_file):
"""Replace one media by another one into a docx
This has been done mainly because it is not possible to add images in
docx header/footer.
With this function, put a dummy picture in your header/footer,
then specify it with its replacement in this function
Syntax: tpl.replace_media('dummy_media_to_replace.png','media_to_paste.jpg')
Note: for images, the aspect ratio will be the same as the replaced image
Note2 : it is important to have the source media file as it is required
to calculate its CRC to find them in the docx
"""
with open(dst_file, 'rb') as fh:
crc = self.get_file_crc(src_file)
self.crc_to_new_media[crc] = fh.read() | python | def replace_media(self,src_file,dst_file):
with open(dst_file, 'rb') as fh:
crc = self.get_file_crc(src_file)
self.crc_to_new_media[crc] = fh.read() | [
"def",
"replace_media",
"(",
"self",
",",
"src_file",
",",
"dst_file",
")",
":",
"with",
"open",
"(",
"dst_file",
",",
"'rb'",
")",
"as",
"fh",
":",
"crc",
"=",
"self",
".",
"get_file_crc",
"(",
"src_file",
")",
"self",
".",
"crc_to_new_media",
"[",
"c... | Replace one media by another one into a docx
This has been done mainly because it is not possible to add images in
docx header/footer.
With this function, put a dummy picture in your header/footer,
then specify it with its replacement in this function
Syntax: tpl.replace_media('dummy_media_to_replace.png','media_to_paste.jpg')
Note: for images, the aspect ratio will be the same as the replaced image
Note2 : it is important to have the source media file as it is required
to calculate its CRC to find them in the docx | [
"Replace",
"one",
"media",
"by",
"another",
"one",
"into",
"a",
"docx"
] | 3291da7ff3ae66475eaca59af2c02ade40a1fd44 | https://github.com/elapouya/python-docx-template/blob/3291da7ff3ae66475eaca59af2c02ade40a1fd44/docxtpl/__init__.py#L376-L392 |
226,830 | elapouya/python-docx-template | docxtpl/__init__.py | DocxTemplate.replace_embedded | def replace_embedded(self,src_file,dst_file):
"""Replace one embdded object by another one into a docx
This has been done mainly because it is not possible to add images
in docx header/footer.
With this function, put a dummy picture in your header/footer,
then specify it with its replacement in this function
Syntax: tpl.replace_embedded('dummy_doc.docx','doc_to_paste.docx')
Note2 : it is important to have the source file as it is required to
calculate its CRC to find them in the docx
"""
with open(dst_file, 'rb') as fh:
crc = self.get_file_crc(src_file)
self.crc_to_new_embedded[crc] = fh.read() | python | def replace_embedded(self,src_file,dst_file):
with open(dst_file, 'rb') as fh:
crc = self.get_file_crc(src_file)
self.crc_to_new_embedded[crc] = fh.read() | [
"def",
"replace_embedded",
"(",
"self",
",",
"src_file",
",",
"dst_file",
")",
":",
"with",
"open",
"(",
"dst_file",
",",
"'rb'",
")",
"as",
"fh",
":",
"crc",
"=",
"self",
".",
"get_file_crc",
"(",
"src_file",
")",
"self",
".",
"crc_to_new_embedded",
"["... | Replace one embdded object by another one into a docx
This has been done mainly because it is not possible to add images
in docx header/footer.
With this function, put a dummy picture in your header/footer,
then specify it with its replacement in this function
Syntax: tpl.replace_embedded('dummy_doc.docx','doc_to_paste.docx')
Note2 : it is important to have the source file as it is required to
calculate its CRC to find them in the docx | [
"Replace",
"one",
"embdded",
"object",
"by",
"another",
"one",
"into",
"a",
"docx"
] | 3291da7ff3ae66475eaca59af2c02ade40a1fd44 | https://github.com/elapouya/python-docx-template/blob/3291da7ff3ae66475eaca59af2c02ade40a1fd44/docxtpl/__init__.py#L422-L437 |
226,831 | elapouya/python-docx-template | docxtpl/__init__.py | DocxTemplate.build_pic_map | def build_pic_map(self):
"""Searches in docx template all the xml pictures tag and store them
in pic_map dict"""
if self.pic_to_replace:
# Main document
part=self.docx.part
self.pic_map.update(self._img_filename_to_part(part))
# Header/Footer
for relid, rel in six.iteritems(self.docx.part.rels):
if rel.reltype in (REL_TYPE.HEADER,REL_TYPE.FOOTER):
self.pic_map.update(self._img_filename_to_part(rel.target_part)) | python | def build_pic_map(self):
if self.pic_to_replace:
# Main document
part=self.docx.part
self.pic_map.update(self._img_filename_to_part(part))
# Header/Footer
for relid, rel in six.iteritems(self.docx.part.rels):
if rel.reltype in (REL_TYPE.HEADER,REL_TYPE.FOOTER):
self.pic_map.update(self._img_filename_to_part(rel.target_part)) | [
"def",
"build_pic_map",
"(",
"self",
")",
":",
"if",
"self",
".",
"pic_to_replace",
":",
"# Main document",
"part",
"=",
"self",
".",
"docx",
".",
"part",
"self",
".",
"pic_map",
".",
"update",
"(",
"self",
".",
"_img_filename_to_part",
"(",
"part",
")",
... | Searches in docx template all the xml pictures tag and store them
in pic_map dict | [
"Searches",
"in",
"docx",
"template",
"all",
"the",
"xml",
"pictures",
"tag",
"and",
"store",
"them",
"in",
"pic_map",
"dict"
] | 3291da7ff3ae66475eaca59af2c02ade40a1fd44 | https://github.com/elapouya/python-docx-template/blob/3291da7ff3ae66475eaca59af2c02ade40a1fd44/docxtpl/__init__.py#L471-L482 |
226,832 | jazzband/django-constance | constance/apps.py | ConstanceConfig.create_perm | def create_perm(self, using=None, *args, **kwargs):
"""
Creates a fake content type and permission
to be able to check for permissions
"""
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
constance_dbs = getattr(settings, 'CONSTANCE_DBS', None)
if constance_dbs is not None and using not in constance_dbs:
return
if ContentType._meta.installed and Permission._meta.installed:
content_type, created = ContentType.objects.using(using).get_or_create(
app_label='constance',
model='config',
)
permission, created = Permission.objects.using(using).get_or_create(
content_type=content_type,
codename='change_config',
defaults={'name': 'Can change config'}) | python | def create_perm(self, using=None, *args, **kwargs):
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
constance_dbs = getattr(settings, 'CONSTANCE_DBS', None)
if constance_dbs is not None and using not in constance_dbs:
return
if ContentType._meta.installed and Permission._meta.installed:
content_type, created = ContentType.objects.using(using).get_or_create(
app_label='constance',
model='config',
)
permission, created = Permission.objects.using(using).get_or_create(
content_type=content_type,
codename='change_config',
defaults={'name': 'Can change config'}) | [
"def",
"create_perm",
"(",
"self",
",",
"using",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"django",
".",
"conf",
"import",
"settings",
"from",
"django",
".",
"contrib",
".",
"auth",
".",
"models",
"import",
"Permission"... | Creates a fake content type and permission
to be able to check for permissions | [
"Creates",
"a",
"fake",
"content",
"type",
"and",
"permission",
"to",
"be",
"able",
"to",
"check",
"for",
"permissions"
] | 2948dff3cc7a5bcab3f996e7da89af18700de3cc | https://github.com/jazzband/django-constance/blob/2948dff3cc7a5bcab3f996e7da89af18700de3cc/constance/apps.py#L14-L35 |
226,833 | jazzband/django-constance | constance/checks.py | check_fieldsets | def check_fieldsets(*args, **kwargs):
"""
A Django system check to make sure that, if defined, CONFIG_FIELDSETS accounts for
every entry in settings.CONFIG.
"""
if hasattr(settings, "CONFIG_FIELDSETS") and settings.CONFIG_FIELDSETS:
inconsistent_fieldnames = get_inconsistent_fieldnames()
if inconsistent_fieldnames:
return [
checks.Warning(
_(
"CONSTANCE_CONFIG_FIELDSETS is missing "
"field(s) that exists in CONSTANCE_CONFIG."
),
hint=", ".join(sorted(inconsistent_fieldnames)),
obj="settings.CONSTANCE_CONFIG",
id="constance.E001",
)
]
return [] | python | def check_fieldsets(*args, **kwargs):
if hasattr(settings, "CONFIG_FIELDSETS") and settings.CONFIG_FIELDSETS:
inconsistent_fieldnames = get_inconsistent_fieldnames()
if inconsistent_fieldnames:
return [
checks.Warning(
_(
"CONSTANCE_CONFIG_FIELDSETS is missing "
"field(s) that exists in CONSTANCE_CONFIG."
),
hint=", ".join(sorted(inconsistent_fieldnames)),
obj="settings.CONSTANCE_CONFIG",
id="constance.E001",
)
]
return [] | [
"def",
"check_fieldsets",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"\"CONFIG_FIELDSETS\"",
")",
"and",
"settings",
".",
"CONFIG_FIELDSETS",
":",
"inconsistent_fieldnames",
"=",
"get_inconsistent_fieldnames",
"(",
... | A Django system check to make sure that, if defined, CONFIG_FIELDSETS accounts for
every entry in settings.CONFIG. | [
"A",
"Django",
"system",
"check",
"to",
"make",
"sure",
"that",
"if",
"defined",
"CONFIG_FIELDSETS",
"accounts",
"for",
"every",
"entry",
"in",
"settings",
".",
"CONFIG",
"."
] | 2948dff3cc7a5bcab3f996e7da89af18700de3cc | https://github.com/jazzband/django-constance/blob/2948dff3cc7a5bcab3f996e7da89af18700de3cc/constance/checks.py#L8-L27 |
226,834 | jazzband/django-constance | constance/checks.py | get_inconsistent_fieldnames | def get_inconsistent_fieldnames():
"""
Returns a set of keys from settings.CONFIG that are not accounted for in
settings.CONFIG_FIELDSETS.
If there are no fieldnames in settings.CONFIG_FIELDSETS, returns an empty set.
"""
field_name_list = []
for fieldset_title, fields_list in settings.CONFIG_FIELDSETS.items():
for field_name in fields_list:
field_name_list.append(field_name)
if not field_name_list:
return {}
return set(set(settings.CONFIG.keys()) - set(field_name_list)) | python | def get_inconsistent_fieldnames():
field_name_list = []
for fieldset_title, fields_list in settings.CONFIG_FIELDSETS.items():
for field_name in fields_list:
field_name_list.append(field_name)
if not field_name_list:
return {}
return set(set(settings.CONFIG.keys()) - set(field_name_list)) | [
"def",
"get_inconsistent_fieldnames",
"(",
")",
":",
"field_name_list",
"=",
"[",
"]",
"for",
"fieldset_title",
",",
"fields_list",
"in",
"settings",
".",
"CONFIG_FIELDSETS",
".",
"items",
"(",
")",
":",
"for",
"field_name",
"in",
"fields_list",
":",
"field_name... | Returns a set of keys from settings.CONFIG that are not accounted for in
settings.CONFIG_FIELDSETS.
If there are no fieldnames in settings.CONFIG_FIELDSETS, returns an empty set. | [
"Returns",
"a",
"set",
"of",
"keys",
"from",
"settings",
".",
"CONFIG",
"that",
"are",
"not",
"accounted",
"for",
"in",
"settings",
".",
"CONFIG_FIELDSETS",
".",
"If",
"there",
"are",
"no",
"fieldnames",
"in",
"settings",
".",
"CONFIG_FIELDSETS",
"returns",
... | 2948dff3cc7a5bcab3f996e7da89af18700de3cc | https://github.com/jazzband/django-constance/blob/2948dff3cc7a5bcab3f996e7da89af18700de3cc/constance/checks.py#L30-L42 |
226,835 | xtrinch/fcm-django | fcm_django/models.py | FCMDeviceQuerySet.send_message | def send_message(
self,
title=None,
body=None,
icon=None,
data=None,
sound=None,
badge=None,
api_key=None,
**kwargs):
"""
Send notification for all active devices in queryset and deactivate if
DELETE_INACTIVE_DEVICES setting is set to True.
"""
if self:
from .fcm import fcm_send_bulk_message
registration_ids = list(self.filter(active=True).values_list(
'registration_id',
flat=True
))
if len(registration_ids) == 0:
return [{'failure': len(self), 'success': 0}]
result = fcm_send_bulk_message(
registration_ids=registration_ids,
title=title,
body=body,
icon=icon,
data=data,
sound=sound,
badge=badge,
api_key=api_key,
**kwargs
)
self._deactivate_devices_with_error_results(
registration_ids,
result['results']
)
return result | python | def send_message(
self,
title=None,
body=None,
icon=None,
data=None,
sound=None,
badge=None,
api_key=None,
**kwargs):
if self:
from .fcm import fcm_send_bulk_message
registration_ids = list(self.filter(active=True).values_list(
'registration_id',
flat=True
))
if len(registration_ids) == 0:
return [{'failure': len(self), 'success': 0}]
result = fcm_send_bulk_message(
registration_ids=registration_ids,
title=title,
body=body,
icon=icon,
data=data,
sound=sound,
badge=badge,
api_key=api_key,
**kwargs
)
self._deactivate_devices_with_error_results(
registration_ids,
result['results']
)
return result | [
"def",
"send_message",
"(",
"self",
",",
"title",
"=",
"None",
",",
"body",
"=",
"None",
",",
"icon",
"=",
"None",
",",
"data",
"=",
"None",
",",
"sound",
"=",
"None",
",",
"badge",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"*",
"*",
"kwargs... | Send notification for all active devices in queryset and deactivate if
DELETE_INACTIVE_DEVICES setting is set to True. | [
"Send",
"notification",
"for",
"all",
"active",
"devices",
"in",
"queryset",
"and",
"deactivate",
"if",
"DELETE_INACTIVE_DEVICES",
"setting",
"is",
"set",
"to",
"True",
"."
] | 8480d1cf935bfb28e2ad6d86a0abf923c2ecb266 | https://github.com/xtrinch/fcm-django/blob/8480d1cf935bfb28e2ad6d86a0abf923c2ecb266/fcm_django/models.py#L44-L84 |
226,836 | xtrinch/fcm-django | fcm_django/models.py | FCMDeviceQuerySet.send_data_message | def send_data_message(
self,
api_key=None,
condition=None,
collapse_key=None,
delay_while_idle=False,
time_to_live=None,
restricted_package_name=None,
low_priority=False,
dry_run=False,
data_message=None,
content_available=None,
timeout=5,
json_encoder=None):
"""
Send data messages for all active devices in queryset and deactivate if
DELETE_INACTIVE_DEVICES setting is set to True.
"""
if self:
from .fcm import fcm_send_bulk_data_messages
registration_ids = list(self.filter(active=True).values_list(
'registration_id',
flat=True
))
if len(registration_ids) == 0:
return [{'failure': len(self), 'success': 0}]
result = fcm_send_bulk_data_messages(
api_key=api_key,
registration_ids=registration_ids,
condition=condition,
collapse_key=collapse_key,
delay_while_idle=delay_while_idle,
time_to_live=time_to_live,
restricted_package_name=restricted_package_name,
low_priority=low_priority,
dry_run=dry_run,
data_message=data_message,
content_available=content_available,
timeout=timeout,
json_encoder=json_encoder,
)
self._deactivate_devices_with_error_results(
registration_ids,
result['results']
)
return result | python | def send_data_message(
self,
api_key=None,
condition=None,
collapse_key=None,
delay_while_idle=False,
time_to_live=None,
restricted_package_name=None,
low_priority=False,
dry_run=False,
data_message=None,
content_available=None,
timeout=5,
json_encoder=None):
if self:
from .fcm import fcm_send_bulk_data_messages
registration_ids = list(self.filter(active=True).values_list(
'registration_id',
flat=True
))
if len(registration_ids) == 0:
return [{'failure': len(self), 'success': 0}]
result = fcm_send_bulk_data_messages(
api_key=api_key,
registration_ids=registration_ids,
condition=condition,
collapse_key=collapse_key,
delay_while_idle=delay_while_idle,
time_to_live=time_to_live,
restricted_package_name=restricted_package_name,
low_priority=low_priority,
dry_run=dry_run,
data_message=data_message,
content_available=content_available,
timeout=timeout,
json_encoder=json_encoder,
)
self._deactivate_devices_with_error_results(
registration_ids,
result['results']
)
return result | [
"def",
"send_data_message",
"(",
"self",
",",
"api_key",
"=",
"None",
",",
"condition",
"=",
"None",
",",
"collapse_key",
"=",
"None",
",",
"delay_while_idle",
"=",
"False",
",",
"time_to_live",
"=",
"None",
",",
"restricted_package_name",
"=",
"None",
",",
... | Send data messages for all active devices in queryset and deactivate if
DELETE_INACTIVE_DEVICES setting is set to True. | [
"Send",
"data",
"messages",
"for",
"all",
"active",
"devices",
"in",
"queryset",
"and",
"deactivate",
"if",
"DELETE_INACTIVE_DEVICES",
"setting",
"is",
"set",
"to",
"True",
"."
] | 8480d1cf935bfb28e2ad6d86a0abf923c2ecb266 | https://github.com/xtrinch/fcm-django/blob/8480d1cf935bfb28e2ad6d86a0abf923c2ecb266/fcm_django/models.py#L86-L135 |
226,837 | xtrinch/fcm-django | fcm_django/models.py | AbstractFCMDevice.send_message | def send_message(
self,
title=None,
body=None,
icon=None,
data=None,
sound=None,
badge=None,
api_key=None,
**kwargs):
"""
Send single notification message.
"""
from .fcm import fcm_send_message
result = fcm_send_message(
registration_id=str(self.registration_id),
title=title,
body=body,
icon=icon,
data=data,
sound=sound,
badge=badge,
api_key=api_key,
**kwargs
)
self._deactivate_device_on_error_result(result)
return result | python | def send_message(
self,
title=None,
body=None,
icon=None,
data=None,
sound=None,
badge=None,
api_key=None,
**kwargs):
from .fcm import fcm_send_message
result = fcm_send_message(
registration_id=str(self.registration_id),
title=title,
body=body,
icon=icon,
data=data,
sound=sound,
badge=badge,
api_key=api_key,
**kwargs
)
self._deactivate_device_on_error_result(result)
return result | [
"def",
"send_message",
"(",
"self",
",",
"title",
"=",
"None",
",",
"body",
"=",
"None",
",",
"icon",
"=",
"None",
",",
"data",
"=",
"None",
",",
"sound",
"=",
"None",
",",
"badge",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"*",
"*",
"kwargs... | Send single notification message. | [
"Send",
"single",
"notification",
"message",
"."
] | 8480d1cf935bfb28e2ad6d86a0abf923c2ecb266 | https://github.com/xtrinch/fcm-django/blob/8480d1cf935bfb28e2ad6d86a0abf923c2ecb266/fcm_django/models.py#L173-L200 |
226,838 | xtrinch/fcm-django | fcm_django/models.py | AbstractFCMDevice.send_data_message | def send_data_message(
self,
condition=None,
collapse_key=None,
delay_while_idle=False,
time_to_live=None,
restricted_package_name=None,
low_priority=False,
dry_run=False,
data_message=None,
content_available=None,
api_key=None,
timeout=5,
json_encoder=None):
"""
Send single data message.
"""
from .fcm import fcm_send_single_device_data_message
result = fcm_send_single_device_data_message(
registration_id=str(self.registration_id),
condition=condition,
collapse_key=collapse_key,
delay_while_idle=delay_while_idle,
time_to_live=time_to_live,
restricted_package_name=restricted_package_name,
low_priority=low_priority,
dry_run=dry_run,
data_message=data_message,
content_available=content_available,
api_key=api_key,
timeout=timeout,
json_encoder=json_encoder,
)
self._deactivate_device_on_error_result(result)
return result | python | def send_data_message(
self,
condition=None,
collapse_key=None,
delay_while_idle=False,
time_to_live=None,
restricted_package_name=None,
low_priority=False,
dry_run=False,
data_message=None,
content_available=None,
api_key=None,
timeout=5,
json_encoder=None):
from .fcm import fcm_send_single_device_data_message
result = fcm_send_single_device_data_message(
registration_id=str(self.registration_id),
condition=condition,
collapse_key=collapse_key,
delay_while_idle=delay_while_idle,
time_to_live=time_to_live,
restricted_package_name=restricted_package_name,
low_priority=low_priority,
dry_run=dry_run,
data_message=data_message,
content_available=content_available,
api_key=api_key,
timeout=timeout,
json_encoder=json_encoder,
)
self._deactivate_device_on_error_result(result)
return result | [
"def",
"send_data_message",
"(",
"self",
",",
"condition",
"=",
"None",
",",
"collapse_key",
"=",
"None",
",",
"delay_while_idle",
"=",
"False",
",",
"time_to_live",
"=",
"None",
",",
"restricted_package_name",
"=",
"None",
",",
"low_priority",
"=",
"False",
"... | Send single data message. | [
"Send",
"single",
"data",
"message",
"."
] | 8480d1cf935bfb28e2ad6d86a0abf923c2ecb266 | https://github.com/xtrinch/fcm-django/blob/8480d1cf935bfb28e2ad6d86a0abf923c2ecb266/fcm_django/models.py#L202-L237 |
226,839 | bpsmith/tia | tia/analysis/ta.py | sma | def sma(arg, n):
""" If n is 0 then return the ltd mean; else return the n day mean """
if n == 0:
return pd.expanding_mean(arg)
else:
return pd.rolling_mean(arg, n, min_periods=n) | python | def sma(arg, n):
if n == 0:
return pd.expanding_mean(arg)
else:
return pd.rolling_mean(arg, n, min_periods=n) | [
"def",
"sma",
"(",
"arg",
",",
"n",
")",
":",
"if",
"n",
"==",
"0",
":",
"return",
"pd",
".",
"expanding_mean",
"(",
"arg",
")",
"else",
":",
"return",
"pd",
".",
"rolling_mean",
"(",
"arg",
",",
"n",
",",
"min_periods",
"=",
"n",
")"
] | If n is 0 then return the ltd mean; else return the n day mean | [
"If",
"n",
"is",
"0",
"then",
"return",
"the",
"ltd",
"mean",
";",
"else",
"return",
"the",
"n",
"day",
"mean"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/ta.py#L13-L18 |
226,840 | bpsmith/tia | tia/analysis/ta.py | rsi | def rsi(arg, n):
""" compute RSI for the given arg
arg: Series or DataFrame
"""
if isinstance(arg, pd.DataFrame):
cols = [(name, rsi(arg[name], n)) for name in arg.columns]
return pd.DataFrame.from_items(cols)
else:
assert isinstance(arg, pd.Series)
n = int(n)
converted = arg.dropna()
change = converted.diff()
gain = change.apply(lambda c: c > 0 and c or 0)
avgGain = wilderma(gain, n)
loss = change.apply(lambda c: c < 0 and abs(c) or 0)
avgLoss = wilderma(loss, n)
result = avgGain / avgLoss
result[result == np.inf] = 100. # divide by zero
result = 100. - (100. / (1. + result))
return pd.Series(result, index=converted.index).reindex(arg.index) | python | def rsi(arg, n):
if isinstance(arg, pd.DataFrame):
cols = [(name, rsi(arg[name], n)) for name in arg.columns]
return pd.DataFrame.from_items(cols)
else:
assert isinstance(arg, pd.Series)
n = int(n)
converted = arg.dropna()
change = converted.diff()
gain = change.apply(lambda c: c > 0 and c or 0)
avgGain = wilderma(gain, n)
loss = change.apply(lambda c: c < 0 and abs(c) or 0)
avgLoss = wilderma(loss, n)
result = avgGain / avgLoss
result[result == np.inf] = 100. # divide by zero
result = 100. - (100. / (1. + result))
return pd.Series(result, index=converted.index).reindex(arg.index) | [
"def",
"rsi",
"(",
"arg",
",",
"n",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"pd",
".",
"DataFrame",
")",
":",
"cols",
"=",
"[",
"(",
"name",
",",
"rsi",
"(",
"arg",
"[",
"name",
"]",
",",
"n",
")",
")",
"for",
"name",
"in",
"arg",
".... | compute RSI for the given arg
arg: Series or DataFrame | [
"compute",
"RSI",
"for",
"the",
"given",
"arg"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/ta.py#L174-L195 |
226,841 | bpsmith/tia | tia/util/windows.py | send_outlook_email | def send_outlook_email(to, subject, body, attachments=None, cc=None, bcc=None, is_html=0):
""" Send an email using your local outlook client """
import win32com.client
asarr = lambda v: None if not v else isinstance(v, basestring) and [v] or v
def update_recipients(robj, users, type):
users = asarr(to)
if users:
for u in users:
r = robj.Add(u)
r.Type = type
outlook = win32com.client.gencache.EnsureDispatch("Outlook.Application")
mapi = outlook.GetNamespace("MAPI")
constants = win32com.client.constants
msg = outlook.CreateItem(0)
# setup the recipients
recipients = msg.Recipients
to and update_recipients(recipients, to, constants.olTo)
cc and update_recipients(recipients, cc, constants.olCC)
bcc and update_recipients(recipients, bcc, constants.olBCC)
recipients.ResolveAll()
msg.Subject = subject
if is_html:
msg.BodyFormat = constants.olFormatHTML
msg.HTMLBody = body
else:
msg.Body = body
map(lambda fpath: msg.Attachments.Add(fpath), attachments or [])
msg.Send() | python | def send_outlook_email(to, subject, body, attachments=None, cc=None, bcc=None, is_html=0):
import win32com.client
asarr = lambda v: None if not v else isinstance(v, basestring) and [v] or v
def update_recipients(robj, users, type):
users = asarr(to)
if users:
for u in users:
r = robj.Add(u)
r.Type = type
outlook = win32com.client.gencache.EnsureDispatch("Outlook.Application")
mapi = outlook.GetNamespace("MAPI")
constants = win32com.client.constants
msg = outlook.CreateItem(0)
# setup the recipients
recipients = msg.Recipients
to and update_recipients(recipients, to, constants.olTo)
cc and update_recipients(recipients, cc, constants.olCC)
bcc and update_recipients(recipients, bcc, constants.olBCC)
recipients.ResolveAll()
msg.Subject = subject
if is_html:
msg.BodyFormat = constants.olFormatHTML
msg.HTMLBody = body
else:
msg.Body = body
map(lambda fpath: msg.Attachments.Add(fpath), attachments or [])
msg.Send() | [
"def",
"send_outlook_email",
"(",
"to",
",",
"subject",
",",
"body",
",",
"attachments",
"=",
"None",
",",
"cc",
"=",
"None",
",",
"bcc",
"=",
"None",
",",
"is_html",
"=",
"0",
")",
":",
"import",
"win32com",
".",
"client",
"asarr",
"=",
"lambda",
"v... | Send an email using your local outlook client | [
"Send",
"an",
"email",
"using",
"your",
"local",
"outlook",
"client"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/util/windows.py#L7-L36 |
226,842 | bpsmith/tia | tia/util/windows.py | WinSCPBatch.add_uploads | def add_uploads(self, filemap):
"""Add the dict of uploads
Parameters
----------
filemap: dict, (remote_filename -> local_filename)
"""
[self.add_upload(k, v) for k, v in filemap.iteritems()] | python | def add_uploads(self, filemap):
[self.add_upload(k, v) for k, v in filemap.iteritems()] | [
"def",
"add_uploads",
"(",
"self",
",",
"filemap",
")",
":",
"[",
"self",
".",
"add_upload",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"filemap",
".",
"iteritems",
"(",
")",
"]"
] | Add the dict of uploads
Parameters
----------
filemap: dict, (remote_filename -> local_filename) | [
"Add",
"the",
"dict",
"of",
"uploads"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/util/windows.py#L71-L78 |
226,843 | bpsmith/tia | tia/analysis/model/ret.py | return_on_initial_capital | def return_on_initial_capital(capital, period_pl, leverage=None):
"""Return the daily return series based on the capital"""
if capital <= 0:
raise ValueError('cost must be a positive number not %s' % capital)
leverage = leverage or 1.
eod = capital + (leverage * period_pl.cumsum())
ltd_rets = (eod / capital) - 1.
dly_rets = ltd_rets
dly_rets.iloc[1:] = (1. + ltd_rets).pct_change().iloc[1:]
return dly_rets | python | def return_on_initial_capital(capital, period_pl, leverage=None):
if capital <= 0:
raise ValueError('cost must be a positive number not %s' % capital)
leverage = leverage or 1.
eod = capital + (leverage * period_pl.cumsum())
ltd_rets = (eod / capital) - 1.
dly_rets = ltd_rets
dly_rets.iloc[1:] = (1. + ltd_rets).pct_change().iloc[1:]
return dly_rets | [
"def",
"return_on_initial_capital",
"(",
"capital",
",",
"period_pl",
",",
"leverage",
"=",
"None",
")",
":",
"if",
"capital",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'cost must be a positive number not %s'",
"%",
"capital",
")",
"leverage",
"=",
"leverage",
... | Return the daily return series based on the capital | [
"Return",
"the",
"daily",
"return",
"series",
"based",
"on",
"the",
"capital"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/ret.py#L17-L26 |
226,844 | bpsmith/tia | tia/rlab/patch.py | _listWrapOn | def _listWrapOn(F, availWidth, canv, mergeSpace=1, obj=None, dims=None):
'''return max width, required height for a list of flowables F'''
doct = getattr(canv, '_doctemplate', None)
cframe = getattr(doct, 'frame', None)
if cframe:
from reportlab.platypus.doctemplate import _addGeneratedContent, Indenter
doct_frame = cframe
from copy import deepcopy
cframe = doct.frame = deepcopy(doct_frame)
cframe._generated_content = None
del cframe._generated_content
try:
W = 0
H = 0
pS = 0
atTop = 1
F = F[:]
while F:
f = F.pop(0)
if hasattr(f, 'frameAction'):
from reportlab.platypus.doctemplate import Indenter
if isinstance(f, Indenter):
availWidth -= f.left + f.right
continue
w, h = f.wrapOn(canv, availWidth, 0xfffffff)
if dims is not None: dims.append((w, h))
if cframe:
_addGeneratedContent(F, cframe)
if w <= fl._FUZZ or h <= fl._FUZZ: continue
#
# THE HACK
#
# W = max(W,min(availWidth, w))
W = max(W, w)
H += h
if not atTop:
h = f.getSpaceBefore()
if mergeSpace:
if getattr(f, '_SPACETRANSFER', False):
h = pS
h = max(h - pS, 0)
H += h
else:
if obj is not None: obj._spaceBefore = f.getSpaceBefore()
atTop = 0
s = f.getSpaceAfter()
if getattr(f, '_SPACETRANSFER', False):
s = pS
pS = s
H += pS
if obj is not None: obj._spaceAfter = pS
return W, H - pS
finally:
if cframe:
doct.frame = doct_frame | python | def _listWrapOn(F, availWidth, canv, mergeSpace=1, obj=None, dims=None):
'''return max width, required height for a list of flowables F'''
doct = getattr(canv, '_doctemplate', None)
cframe = getattr(doct, 'frame', None)
if cframe:
from reportlab.platypus.doctemplate import _addGeneratedContent, Indenter
doct_frame = cframe
from copy import deepcopy
cframe = doct.frame = deepcopy(doct_frame)
cframe._generated_content = None
del cframe._generated_content
try:
W = 0
H = 0
pS = 0
atTop = 1
F = F[:]
while F:
f = F.pop(0)
if hasattr(f, 'frameAction'):
from reportlab.platypus.doctemplate import Indenter
if isinstance(f, Indenter):
availWidth -= f.left + f.right
continue
w, h = f.wrapOn(canv, availWidth, 0xfffffff)
if dims is not None: dims.append((w, h))
if cframe:
_addGeneratedContent(F, cframe)
if w <= fl._FUZZ or h <= fl._FUZZ: continue
#
# THE HACK
#
# W = max(W,min(availWidth, w))
W = max(W, w)
H += h
if not atTop:
h = f.getSpaceBefore()
if mergeSpace:
if getattr(f, '_SPACETRANSFER', False):
h = pS
h = max(h - pS, 0)
H += h
else:
if obj is not None: obj._spaceBefore = f.getSpaceBefore()
atTop = 0
s = f.getSpaceAfter()
if getattr(f, '_SPACETRANSFER', False):
s = pS
pS = s
H += pS
if obj is not None: obj._spaceAfter = pS
return W, H - pS
finally:
if cframe:
doct.frame = doct_frame | [
"def",
"_listWrapOn",
"(",
"F",
",",
"availWidth",
",",
"canv",
",",
"mergeSpace",
"=",
"1",
",",
"obj",
"=",
"None",
",",
"dims",
"=",
"None",
")",
":",
"doct",
"=",
"getattr",
"(",
"canv",
",",
"'_doctemplate'",
",",
"None",
")",
"cframe",
"=",
"... | return max width, required height for a list of flowables F | [
"return",
"max",
"width",
"required",
"height",
"for",
"a",
"list",
"of",
"flowables",
"F"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/patch.py#L8-L65 |
226,845 | bpsmith/tia | tia/analysis/model/pos.py | Positions.plot_rets | def plot_rets(self, ls=1, ax=None):
"""Plot each of the position returns
:param ls: True, if positions should be broken into long/short
:param ax: Axes
:param regr: True, if regression line is shown
"""
import matplotlib.pyplot as plt
from tia.util.mplot import AxesFormat
if ax is None:
ax = plt.gca()
frame = self.frame
if not ls:
ax.scatter(frame.index, frame.ret, c='k', marker='o', label='All')
else:
if len(self.long_pids) > 0:
lframe = frame.ix[frame.index.isin(self.long_pids)]
ax.scatter(lframe.index, lframe.ret, c='k', marker='o', label='Long')
if len(self.short_pids) > 0:
sframe = frame.ix[frame.index.isin(self.short_pids)]
ax.scatter(sframe.index, sframe.ret, c='r', marker='o', label='Short')
# set some boundaries
AxesFormat().Y.percent().apply()
ax.set_xlim(0, frame.index.max() + 3)
ax.set_xlabel('pid')
ax.set_ylabel('return')
ax.legend(loc='upper left')
return ax | python | def plot_rets(self, ls=1, ax=None):
import matplotlib.pyplot as plt
from tia.util.mplot import AxesFormat
if ax is None:
ax = plt.gca()
frame = self.frame
if not ls:
ax.scatter(frame.index, frame.ret, c='k', marker='o', label='All')
else:
if len(self.long_pids) > 0:
lframe = frame.ix[frame.index.isin(self.long_pids)]
ax.scatter(lframe.index, lframe.ret, c='k', marker='o', label='Long')
if len(self.short_pids) > 0:
sframe = frame.ix[frame.index.isin(self.short_pids)]
ax.scatter(sframe.index, sframe.ret, c='r', marker='o', label='Short')
# set some boundaries
AxesFormat().Y.percent().apply()
ax.set_xlim(0, frame.index.max() + 3)
ax.set_xlabel('pid')
ax.set_ylabel('return')
ax.legend(loc='upper left')
return ax | [
"def",
"plot_rets",
"(",
"self",
",",
"ls",
"=",
"1",
",",
"ax",
"=",
"None",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"from",
"tia",
".",
"util",
".",
"mplot",
"import",
"AxesFormat",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
... | Plot each of the position returns
:param ls: True, if positions should be broken into long/short
:param ax: Axes
:param regr: True, if regression line is shown | [
"Plot",
"each",
"of",
"the",
"position",
"returns"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/pos.py#L138-L169 |
226,846 | bpsmith/tia | tia/analysis/model/pos.py | Positions.plot_ret_range | def plot_ret_range(self, ax=None, ls=0, dur=0):
"""Plot the return range for each position
:param ax: Axes
"""
import matplotlib.pyplot as plt
from tia.util.mplot import AxesFormat
if ax is None:
ax = plt.gca()
frame = self.frame
pids = frame.index
min_rets = pd.Series([self[pid].performance.ltd_txn.min() for pid in pids], index=pids)
max_rets = pd.Series([self[pid].performance.ltd_txn.max() for pid in pids], index=pids)
if not ls:
s = frame.duration + 20 if dur else 20
ax.scatter(frame.index, frame.ret, s=s, c='k', marker='o', label='All')
ax.vlines(pids, min_rets, max_rets)
else:
if len(self.long_pids) > 0:
lframe = frame.ix[frame.index.isin(self.long_pids)]
s = lframe.duration + 20 if dur else 20
ax.scatter(lframe.index, lframe.ret, s=s, c='k', marker='o', label='Long')
ax.vlines(lframe.index, min_rets[lframe.index], max_rets[frame.index])
if len(self.short_pids) > 0:
sframe = frame.ix[frame.index.isin(self.short_pids)]
s = sframe.duration + 20 if dur else 20
ax.scatter(sframe.index, sframe.ret, s=s, c='r', marker='o', label='Short')
ax.vlines(sframe.index, min_rets[sframe.index], max_rets[sframe.index])
AxesFormat().Y.percent().apply()
ax.axhline(color='k', linestyle='--')
ax.set_xlim(0, frame.index.max() + 3)
ax.set_xlabel('pid')
ax.set_ylabel('return')
ax.legend(loc='upper left')
return ax | python | def plot_ret_range(self, ax=None, ls=0, dur=0):
import matplotlib.pyplot as plt
from tia.util.mplot import AxesFormat
if ax is None:
ax = plt.gca()
frame = self.frame
pids = frame.index
min_rets = pd.Series([self[pid].performance.ltd_txn.min() for pid in pids], index=pids)
max_rets = pd.Series([self[pid].performance.ltd_txn.max() for pid in pids], index=pids)
if not ls:
s = frame.duration + 20 if dur else 20
ax.scatter(frame.index, frame.ret, s=s, c='k', marker='o', label='All')
ax.vlines(pids, min_rets, max_rets)
else:
if len(self.long_pids) > 0:
lframe = frame.ix[frame.index.isin(self.long_pids)]
s = lframe.duration + 20 if dur else 20
ax.scatter(lframe.index, lframe.ret, s=s, c='k', marker='o', label='Long')
ax.vlines(lframe.index, min_rets[lframe.index], max_rets[frame.index])
if len(self.short_pids) > 0:
sframe = frame.ix[frame.index.isin(self.short_pids)]
s = sframe.duration + 20 if dur else 20
ax.scatter(sframe.index, sframe.ret, s=s, c='r', marker='o', label='Short')
ax.vlines(sframe.index, min_rets[sframe.index], max_rets[sframe.index])
AxesFormat().Y.percent().apply()
ax.axhline(color='k', linestyle='--')
ax.set_xlim(0, frame.index.max() + 3)
ax.set_xlabel('pid')
ax.set_ylabel('return')
ax.legend(loc='upper left')
return ax | [
"def",
"plot_ret_range",
"(",
"self",
",",
"ax",
"=",
"None",
",",
"ls",
"=",
"0",
",",
"dur",
"=",
"0",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"from",
"tia",
".",
"util",
".",
"mplot",
"import",
"AxesFormat",
"if",
"ax",
"is... | Plot the return range for each position
:param ax: Axes | [
"Plot",
"the",
"return",
"range",
"for",
"each",
"position"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/pos.py#L171-L210 |
226,847 | bpsmith/tia | tia/analysis/model/pos.py | PositionsStats.consecutive_frame | def consecutive_frame(self):
"""Return a DataFrame with columns cnt, pids, pl. cnt is the number of pids in the sequence. pl is the pl sum"""
if self._frame.empty:
return pd.DataFrame(columns=['pids', 'pl', 'cnt', 'is_win'])
else:
vals = (self._frame[PC.RET] >= 0).astype(int)
seq = (vals.shift(1) != vals).astype(int).cumsum()
def _do_apply(sub):
return pd.Series({
'pids': sub.index.values,
'pl': sub[PC.PL].sum(),
'cnt': len(sub.index),
'is_win': sub[PC.RET].iloc[0] >= 0,
})
return self._frame.groupby(seq).apply(_do_apply) | python | def consecutive_frame(self):
if self._frame.empty:
return pd.DataFrame(columns=['pids', 'pl', 'cnt', 'is_win'])
else:
vals = (self._frame[PC.RET] >= 0).astype(int)
seq = (vals.shift(1) != vals).astype(int).cumsum()
def _do_apply(sub):
return pd.Series({
'pids': sub.index.values,
'pl': sub[PC.PL].sum(),
'cnt': len(sub.index),
'is_win': sub[PC.RET].iloc[0] >= 0,
})
return self._frame.groupby(seq).apply(_do_apply) | [
"def",
"consecutive_frame",
"(",
"self",
")",
":",
"if",
"self",
".",
"_frame",
".",
"empty",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"'pids'",
",",
"'pl'",
",",
"'cnt'",
",",
"'is_win'",
"]",
")",
"else",
":",
"vals",
"=",
... | Return a DataFrame with columns cnt, pids, pl. cnt is the number of pids in the sequence. pl is the pl sum | [
"Return",
"a",
"DataFrame",
"with",
"columns",
"cnt",
"pids",
"pl",
".",
"cnt",
"is",
"the",
"number",
"of",
"pids",
"in",
"the",
"sequence",
".",
"pl",
"is",
"the",
"pl",
"sum"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/pos.py#L258-L274 |
226,848 | bpsmith/tia | tia/analysis/model/pl.py | ProfitAndLossDetails.asfreq | def asfreq(self, freq):
"""Resample the p&l at the specified frequency
:param freq:
:return: Pl object
"""
frame = self.frame
if freq == 'B':
resampled = frame.groupby(frame.index.date).apply(lambda f: f.sum())
resampled.index = pd.DatetimeIndex([i for i in resampled.index])
return ProfitAndLossDetails(resampled)
else:
resampled = frame.resample(freq, how='sum')
return ProfitAndLossDetails(resampled) | python | def asfreq(self, freq):
frame = self.frame
if freq == 'B':
resampled = frame.groupby(frame.index.date).apply(lambda f: f.sum())
resampled.index = pd.DatetimeIndex([i for i in resampled.index])
return ProfitAndLossDetails(resampled)
else:
resampled = frame.resample(freq, how='sum')
return ProfitAndLossDetails(resampled) | [
"def",
"asfreq",
"(",
"self",
",",
"freq",
")",
":",
"frame",
"=",
"self",
".",
"frame",
"if",
"freq",
"==",
"'B'",
":",
"resampled",
"=",
"frame",
".",
"groupby",
"(",
"frame",
".",
"index",
".",
"date",
")",
".",
"apply",
"(",
"lambda",
"f",
":... | Resample the p&l at the specified frequency
:param freq:
:return: Pl object | [
"Resample",
"the",
"p&l",
"at",
"the",
"specified",
"frequency"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/pl.py#L258-L271 |
226,849 | bpsmith/tia | tia/util/mplot.py | GridHelper.get_axes | def get_axes(self, idx):
""" Allow for simple indexing """
cidx = 0
if idx > 0:
cidx = idx % self.ncols
ridx = idx / self.ncols
return self.axarr[ridx][cidx] | python | def get_axes(self, idx):
cidx = 0
if idx > 0:
cidx = idx % self.ncols
ridx = idx / self.ncols
return self.axarr[ridx][cidx] | [
"def",
"get_axes",
"(",
"self",
",",
"idx",
")",
":",
"cidx",
"=",
"0",
"if",
"idx",
">",
"0",
":",
"cidx",
"=",
"idx",
"%",
"self",
".",
"ncols",
"ridx",
"=",
"idx",
"/",
"self",
".",
"ncols",
"return",
"self",
".",
"axarr",
"[",
"ridx",
"]",
... | Allow for simple indexing | [
"Allow",
"for",
"simple",
"indexing"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/util/mplot.py#L226-L232 |
226,850 | bpsmith/tia | tia/analysis/perf.py | periodicity | def periodicity(freq_or_frame):
"""
resolve the number of periods per year
"""
if hasattr(freq_or_frame, 'rule_code'):
rc = freq_or_frame.rule_code
rc = rc.split('-')[0]
factor = PER_YEAR_MAP.get(rc, None)
if factor is not None:
return factor / abs(freq_or_frame.n)
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, basestring):
factor = PER_YEAR_MAP.get(freq_or_frame, None)
if factor is not None:
return factor
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, (pd.Series, pd.DataFrame, pd.TimeSeries)):
freq = freq_or_frame.index.freq
if not freq:
freq = pd.infer_freq(freq_or_frame.index)
if freq:
return periodicity(freq)
else:
# Attempt to resolve it
import warnings
freq = guess_freq(freq_or_frame.index)
warnings.warn('frequency not set. guessed it to be %s' % freq)
return periodicity(freq)
else:
return periodicity(freq)
else:
raise ValueError("periodicity expects DataFrame, Series, or rule_code property") | python | def periodicity(freq_or_frame):
if hasattr(freq_or_frame, 'rule_code'):
rc = freq_or_frame.rule_code
rc = rc.split('-')[0]
factor = PER_YEAR_MAP.get(rc, None)
if factor is not None:
return factor / abs(freq_or_frame.n)
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, basestring):
factor = PER_YEAR_MAP.get(freq_or_frame, None)
if factor is not None:
return factor
else:
raise Exception('Failed to determine periodicity. No factor mapping for %s' % freq_or_frame)
elif isinstance(freq_or_frame, (pd.Series, pd.DataFrame, pd.TimeSeries)):
freq = freq_or_frame.index.freq
if not freq:
freq = pd.infer_freq(freq_or_frame.index)
if freq:
return periodicity(freq)
else:
# Attempt to resolve it
import warnings
freq = guess_freq(freq_or_frame.index)
warnings.warn('frequency not set. guessed it to be %s' % freq)
return periodicity(freq)
else:
return periodicity(freq)
else:
raise ValueError("periodicity expects DataFrame, Series, or rule_code property") | [
"def",
"periodicity",
"(",
"freq_or_frame",
")",
":",
"if",
"hasattr",
"(",
"freq_or_frame",
",",
"'rule_code'",
")",
":",
"rc",
"=",
"freq_or_frame",
".",
"rule_code",
"rc",
"=",
"rc",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"factor",
"=",
"PER_... | resolve the number of periods per year | [
"resolve",
"the",
"number",
"of",
"periods",
"per",
"year"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/perf.py#L64-L98 |
226,851 | bpsmith/tia | tia/analysis/perf.py | _resolve_periods_in_year | def _resolve_periods_in_year(scale, frame):
""" Convert the scale to an annualzation factor. If scale is None then attempt to resolve from frame. If scale is a scalar then
use it. If scale is a string then use it to lookup the annual factor
"""
if scale is None:
return periodicity(frame)
elif isinstance(scale, basestring):
return periodicity(scale)
elif np.isscalar(scale):
return scale
else:
raise ValueError("scale must be None, scalar, or string, not %s" % type(scale)) | python | def _resolve_periods_in_year(scale, frame):
if scale is None:
return periodicity(frame)
elif isinstance(scale, basestring):
return periodicity(scale)
elif np.isscalar(scale):
return scale
else:
raise ValueError("scale must be None, scalar, or string, not %s" % type(scale)) | [
"def",
"_resolve_periods_in_year",
"(",
"scale",
",",
"frame",
")",
":",
"if",
"scale",
"is",
"None",
":",
"return",
"periodicity",
"(",
"frame",
")",
"elif",
"isinstance",
"(",
"scale",
",",
"basestring",
")",
":",
"return",
"periodicity",
"(",
"scale",
"... | Convert the scale to an annualzation factor. If scale is None then attempt to resolve from frame. If scale is a scalar then
use it. If scale is a string then use it to lookup the annual factor | [
"Convert",
"the",
"scale",
"to",
"an",
"annualzation",
"factor",
".",
"If",
"scale",
"is",
"None",
"then",
"attempt",
"to",
"resolve",
"from",
"frame",
".",
"If",
"scale",
"is",
"a",
"scalar",
"then",
"use",
"it",
".",
"If",
"scale",
"is",
"a",
"string... | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/perf.py#L104-L115 |
226,852 | bpsmith/tia | tia/analysis/perf.py | returns_cumulative | def returns_cumulative(returns, geometric=True, expanding=False):
""" return the cumulative return
Parameters
----------
returns : DataFrame or Series
geometric : bool, default is True
If True, geometrically link returns
expanding : bool default is False
If True, return expanding series/frame of returns
If False, return the final value(s)
"""
if expanding:
if geometric:
return (1. + returns).cumprod() - 1.
else:
return returns.cumsum()
else:
if geometric:
return (1. + returns).prod() - 1.
else:
return returns.sum() | python | def returns_cumulative(returns, geometric=True, expanding=False):
if expanding:
if geometric:
return (1. + returns).cumprod() - 1.
else:
return returns.cumsum()
else:
if geometric:
return (1. + returns).prod() - 1.
else:
return returns.sum() | [
"def",
"returns_cumulative",
"(",
"returns",
",",
"geometric",
"=",
"True",
",",
"expanding",
"=",
"False",
")",
":",
"if",
"expanding",
":",
"if",
"geometric",
":",
"return",
"(",
"1.",
"+",
"returns",
")",
".",
"cumprod",
"(",
")",
"-",
"1.",
"else",... | return the cumulative return
Parameters
----------
returns : DataFrame or Series
geometric : bool, default is True
If True, geometrically link returns
expanding : bool default is False
If True, return expanding series/frame of returns
If False, return the final value(s) | [
"return",
"the",
"cumulative",
"return"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/perf.py#L156-L177 |
226,853 | bpsmith/tia | tia/analysis/perf.py | rolling_returns_cumulative | def rolling_returns_cumulative(returns, window, min_periods=1, geometric=True):
""" return the rolling cumulative returns
Parameters
----------
returns : DataFrame or Series
window : number of observations
min_periods : minimum number of observations in a window
geometric : link the returns geometrically
"""
if geometric:
rc = lambda x: (1. + x[np.isfinite(x)]).prod() - 1.
else:
rc = lambda x: (x[np.isfinite(x)]).sum()
return pd.rolling_apply(returns, window, rc, min_periods=min_periods) | python | def rolling_returns_cumulative(returns, window, min_periods=1, geometric=True):
if geometric:
rc = lambda x: (1. + x[np.isfinite(x)]).prod() - 1.
else:
rc = lambda x: (x[np.isfinite(x)]).sum()
return pd.rolling_apply(returns, window, rc, min_periods=min_periods) | [
"def",
"rolling_returns_cumulative",
"(",
"returns",
",",
"window",
",",
"min_periods",
"=",
"1",
",",
"geometric",
"=",
"True",
")",
":",
"if",
"geometric",
":",
"rc",
"=",
"lambda",
"x",
":",
"(",
"1.",
"+",
"x",
"[",
"np",
".",
"isfinite",
"(",
"x... | return the rolling cumulative returns
Parameters
----------
returns : DataFrame or Series
window : number of observations
min_periods : minimum number of observations in a window
geometric : link the returns geometrically | [
"return",
"the",
"rolling",
"cumulative",
"returns"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/perf.py#L180-L195 |
226,854 | bpsmith/tia | tia/analysis/perf.py | returns_annualized | def returns_annualized(returns, geometric=True, scale=None, expanding=False):
""" return the annualized cumulative returns
Parameters
----------
returns : DataFrame or Series
geometric : link the returns geometrically
scale: None or scalar or string (ie 12 for months in year),
If None, attempt to resolve from returns
If scalar, then use this as the annualization factor
If string, then pass this to periodicity function to resolve annualization factor
expanding: bool, default is False
If True, return expanding series/frames.
If False, return final result.
"""
scale = _resolve_periods_in_year(scale, returns)
if expanding:
if geometric:
n = pd.expanding_count(returns)
return ((1. + returns).cumprod() ** (scale / n)) - 1.
else:
return pd.expanding_mean(returns) * scale
else:
if geometric:
n = returns.count()
return ((1. + returns).prod() ** (scale / n)) - 1.
else:
return returns.mean() * scale | python | def returns_annualized(returns, geometric=True, scale=None, expanding=False):
scale = _resolve_periods_in_year(scale, returns)
if expanding:
if geometric:
n = pd.expanding_count(returns)
return ((1. + returns).cumprod() ** (scale / n)) - 1.
else:
return pd.expanding_mean(returns) * scale
else:
if geometric:
n = returns.count()
return ((1. + returns).prod() ** (scale / n)) - 1.
else:
return returns.mean() * scale | [
"def",
"returns_annualized",
"(",
"returns",
",",
"geometric",
"=",
"True",
",",
"scale",
"=",
"None",
",",
"expanding",
"=",
"False",
")",
":",
"scale",
"=",
"_resolve_periods_in_year",
"(",
"scale",
",",
"returns",
")",
"if",
"expanding",
":",
"if",
"geo... | return the annualized cumulative returns
Parameters
----------
returns : DataFrame or Series
geometric : link the returns geometrically
scale: None or scalar or string (ie 12 for months in year),
If None, attempt to resolve from returns
If scalar, then use this as the annualization factor
If string, then pass this to periodicity function to resolve annualization factor
expanding: bool, default is False
If True, return expanding series/frames.
If False, return final result. | [
"return",
"the",
"annualized",
"cumulative",
"returns"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/perf.py#L198-L225 |
226,855 | bpsmith/tia | tia/analysis/perf.py | information_ratio | def information_ratio(rets, bm_rets, scale=None, expanding=False):
"""Information ratio, a common measure of manager efficiency, evaluates excess returns over a benchmark
versus tracking error.
:param rets: period returns
:param bm_rets: periodic benchmark returns (not annualized)
:param scale: None or the scale to be used for annualization
:param expanding:
:return:
"""
scale = _resolve_periods_in_year(scale, rets)
rets_ann = returns_annualized(rets, scale=scale, expanding=expanding)
bm_rets_ann = returns_annualized(rets, scale=scale, expanding=expanding)
tracking_error_ann = std_annualized((rets - bm_rets), scale=scale, expanding=expanding)
return (rets_ann - bm_rets_ann) / tracking_error_ann | python | def information_ratio(rets, bm_rets, scale=None, expanding=False):
scale = _resolve_periods_in_year(scale, rets)
rets_ann = returns_annualized(rets, scale=scale, expanding=expanding)
bm_rets_ann = returns_annualized(rets, scale=scale, expanding=expanding)
tracking_error_ann = std_annualized((rets - bm_rets), scale=scale, expanding=expanding)
return (rets_ann - bm_rets_ann) / tracking_error_ann | [
"def",
"information_ratio",
"(",
"rets",
",",
"bm_rets",
",",
"scale",
"=",
"None",
",",
"expanding",
"=",
"False",
")",
":",
"scale",
"=",
"_resolve_periods_in_year",
"(",
"scale",
",",
"rets",
")",
"rets_ann",
"=",
"returns_annualized",
"(",
"rets",
",",
... | Information ratio, a common measure of manager efficiency, evaluates excess returns over a benchmark
versus tracking error.
:param rets: period returns
:param bm_rets: periodic benchmark returns (not annualized)
:param scale: None or the scale to be used for annualization
:param expanding:
:return: | [
"Information",
"ratio",
"a",
"common",
"measure",
"of",
"manager",
"efficiency",
"evaluates",
"excess",
"returns",
"over",
"a",
"benchmark",
"versus",
"tracking",
"error",
"."
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/perf.py#L376-L390 |
226,856 | bpsmith/tia | tia/analysis/perf.py | rolling_percentileofscore | def rolling_percentileofscore(series, window, min_periods=None):
"""Computue the score percentile for the specified window."""
import scipy.stats as stats
def _percentile(arr):
score = arr[-1]
vals = arr[:-1]
return stats.percentileofscore(vals, score)
notnull = series.dropna()
min_periods = min_periods or window
if notnull.empty:
return pd.Series(np.nan, index=series.index)
else:
return pd.rolling_apply(notnull, window, _percentile, min_periods=min_periods).reindex(series.index) | python | def rolling_percentileofscore(series, window, min_periods=None):
import scipy.stats as stats
def _percentile(arr):
score = arr[-1]
vals = arr[:-1]
return stats.percentileofscore(vals, score)
notnull = series.dropna()
min_periods = min_periods or window
if notnull.empty:
return pd.Series(np.nan, index=series.index)
else:
return pd.rolling_apply(notnull, window, _percentile, min_periods=min_periods).reindex(series.index) | [
"def",
"rolling_percentileofscore",
"(",
"series",
",",
"window",
",",
"min_periods",
"=",
"None",
")",
":",
"import",
"scipy",
".",
"stats",
"as",
"stats",
"def",
"_percentile",
"(",
"arr",
")",
":",
"score",
"=",
"arr",
"[",
"-",
"1",
"]",
"vals",
"=... | Computue the score percentile for the specified window. | [
"Computue",
"the",
"score",
"percentile",
"for",
"the",
"specified",
"window",
"."
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/perf.py#L416-L430 |
226,857 | bpsmith/tia | tia/rlab/builder.py | PdfBuilder.new_title_bar | def new_title_bar(self, title, color=None):
"""Return an array of Pdf Objects which constitute a Header"""
# Build a title bar for top of page
w, t, c = '100%', 2, color or HexColor('#404040')
title = '<b>{0}</b>'.format(title)
if 'TitleBar' not in self.stylesheet:
tb = ParagraphStyle('TitleBar', parent=self.stylesheet['Normal'], fontName='Helvetica-Bold', fontSize=10,
leading=10, alignment=TA_CENTER)
self.stylesheet.add(tb)
return [HRFlowable(width=w, thickness=t, color=c, spaceAfter=2, vAlign='MIDDLE', lineCap='square'),
self.new_paragraph(title, 'TitleBar'),
HRFlowable(width=w, thickness=t, color=c, spaceBefore=2, vAlign='MIDDLE', lineCap='square')] | python | def new_title_bar(self, title, color=None):
# Build a title bar for top of page
w, t, c = '100%', 2, color or HexColor('#404040')
title = '<b>{0}</b>'.format(title)
if 'TitleBar' not in self.stylesheet:
tb = ParagraphStyle('TitleBar', parent=self.stylesheet['Normal'], fontName='Helvetica-Bold', fontSize=10,
leading=10, alignment=TA_CENTER)
self.stylesheet.add(tb)
return [HRFlowable(width=w, thickness=t, color=c, spaceAfter=2, vAlign='MIDDLE', lineCap='square'),
self.new_paragraph(title, 'TitleBar'),
HRFlowable(width=w, thickness=t, color=c, spaceBefore=2, vAlign='MIDDLE', lineCap='square')] | [
"def",
"new_title_bar",
"(",
"self",
",",
"title",
",",
"color",
"=",
"None",
")",
":",
"# Build a title bar for top of page",
"w",
",",
"t",
",",
"c",
"=",
"'100%'",
",",
"2",
",",
"color",
"or",
"HexColor",
"(",
"'#404040'",
")",
"title",
"=",
"'<b>{0}... | Return an array of Pdf Objects which constitute a Header | [
"Return",
"an",
"array",
"of",
"Pdf",
"Objects",
"which",
"constitute",
"a",
"Header"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/builder.py#L174-L185 |
226,858 | bpsmith/tia | tia/rlab/builder.py | PdfBuilder.build_page | def build_page(self, template_id, flowable_map):
"""Build a pdf page by looking up the specified template and then mapping the flowable_map items to the
appropriate named Frame
"""
pt = self.get_page_template(template_id)
# If this is the first page then ensure the page template is ordered first and no breaks or changes
# are requested otherwise blank page shows up
if self.active_template_id is None:
self.make_template_first(template_id)
self.story.append(NextPageTemplate(template_id))
self.inc_cover and self.story.append(PageBreak())
self.active_template_id = template_id
elif self.active_template_id == template_id:
# TODO - understand why this is necessary to not get a blank page between pages
self.story.append(PageBreak())
else:
self.story.append(NextPageTemplate(template_id))
self.story.append(PageBreak())
self.active_template_id = template_id
for idx, frame in enumerate(pt.frames):
if frame.id not in flowable_map:
# Add a note to the template to show that nothing was defined for this area
self.story.append(Paragraph('NOT DEFINED: %s' % frame.id, getSampleStyleSheet()['Normal']))
else:
flowables = flowable_map[frame.id]
if not isinstance(flowables, Flowable) and hasattr(flowables, '__iter__'):
[self.story.append(f) for f in flowables]
else:
self.story.append(flowables)
if idx < (len(pt.frames) - 1):
self.story.append(FrameBreak())
return self | python | def build_page(self, template_id, flowable_map):
pt = self.get_page_template(template_id)
# If this is the first page then ensure the page template is ordered first and no breaks or changes
# are requested otherwise blank page shows up
if self.active_template_id is None:
self.make_template_first(template_id)
self.story.append(NextPageTemplate(template_id))
self.inc_cover and self.story.append(PageBreak())
self.active_template_id = template_id
elif self.active_template_id == template_id:
# TODO - understand why this is necessary to not get a blank page between pages
self.story.append(PageBreak())
else:
self.story.append(NextPageTemplate(template_id))
self.story.append(PageBreak())
self.active_template_id = template_id
for idx, frame in enumerate(pt.frames):
if frame.id not in flowable_map:
# Add a note to the template to show that nothing was defined for this area
self.story.append(Paragraph('NOT DEFINED: %s' % frame.id, getSampleStyleSheet()['Normal']))
else:
flowables = flowable_map[frame.id]
if not isinstance(flowables, Flowable) and hasattr(flowables, '__iter__'):
[self.story.append(f) for f in flowables]
else:
self.story.append(flowables)
if idx < (len(pt.frames) - 1):
self.story.append(FrameBreak())
return self | [
"def",
"build_page",
"(",
"self",
",",
"template_id",
",",
"flowable_map",
")",
":",
"pt",
"=",
"self",
".",
"get_page_template",
"(",
"template_id",
")",
"# If this is the first page then ensure the page template is ordered first and no breaks or changes",
"# are requested oth... | Build a pdf page by looking up the specified template and then mapping the flowable_map items to the
appropriate named Frame | [
"Build",
"a",
"pdf",
"page",
"by",
"looking",
"up",
"the",
"specified",
"template",
"and",
"then",
"mapping",
"the",
"flowable_map",
"items",
"to",
"the",
"appropriate",
"named",
"Frame"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/builder.py#L220-L252 |
226,859 | bpsmith/tia | tia/rlab/builder.py | PdfBuilder.table_formatter | def table_formatter(self, dataframe, inc_header=1, inc_index=1):
"""Return a table formatter for the dataframe. Saves the user the need to import this class"""
return TableFormatter(dataframe, inc_header=inc_header, inc_index=inc_index) | python | def table_formatter(self, dataframe, inc_header=1, inc_index=1):
return TableFormatter(dataframe, inc_header=inc_header, inc_index=inc_index) | [
"def",
"table_formatter",
"(",
"self",
",",
"dataframe",
",",
"inc_header",
"=",
"1",
",",
"inc_index",
"=",
"1",
")",
":",
"return",
"TableFormatter",
"(",
"dataframe",
",",
"inc_header",
"=",
"inc_header",
",",
"inc_index",
"=",
"inc_index",
")"
] | Return a table formatter for the dataframe. Saves the user the need to import this class | [
"Return",
"a",
"table",
"formatter",
"for",
"the",
"dataframe",
".",
"Saves",
"the",
"user",
"the",
"need",
"to",
"import",
"this",
"class"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/builder.py#L262-L264 |
226,860 | bpsmith/tia | tia/analysis/model/port.py | PortfolioPricer.get_mkt_val | def get_mkt_val(self, pxs=None):
""" return the market value series for the specified Series of pxs """
pxs = self._closing_pxs if pxs is None else pxs
return pxs * self.multiplier | python | def get_mkt_val(self, pxs=None):
pxs = self._closing_pxs if pxs is None else pxs
return pxs * self.multiplier | [
"def",
"get_mkt_val",
"(",
"self",
",",
"pxs",
"=",
"None",
")",
":",
"pxs",
"=",
"self",
".",
"_closing_pxs",
"if",
"pxs",
"is",
"None",
"else",
"pxs",
"return",
"pxs",
"*",
"self",
".",
"multiplier"
] | return the market value series for the specified Series of pxs | [
"return",
"the",
"market",
"value",
"series",
"for",
"the",
"specified",
"Series",
"of",
"pxs"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/port.py#L41-L44 |
226,861 | bpsmith/tia | tia/analysis/model/ins.py | InstrumentPrices.volatility | def volatility(self, n, freq=None, which='close', ann=True, model='ln', min_periods=1, rolling='simple'):
"""Return the annualized volatility series. N is the number of lookback periods.
:param n: int, number of lookback periods
:param freq: resample frequency or None
:param which: price series to use
:param ann: If True then annualize
:param model: {'ln', 'pct', 'bbg'}
ln - use logarithmic price changes
pct - use pct price changes
bbg - use logarithmic price changes but Bloomberg uses actual business days
:param rolling:{'simple', 'exp'}, if exp, use ewmstd. if simple, use rolling_std
:return:
"""
if model not in ('bbg', 'ln', 'pct'):
raise ValueError('model must be one of (bbg, ln, pct), not %s' % model)
if rolling not in ('simple', 'exp'):
raise ValueError('rolling must be one of (simple, exp), not %s' % rolling)
px = self.frame[which]
px = px if not freq else px.resample(freq, how='last')
if model == 'bbg' and periods_in_year(px) == 252:
# Bloomberg uses business days, so need to convert and reindex
orig = px.index
px = px.resample('B').ffill()
chg = np.log(px / px.shift(1))
chg[chg.index - orig] = np.nan
if rolling == 'simple':
vol = pd.rolling_std(chg, n, min_periods=min_periods).reindex(orig)
else:
vol = pd.ewmstd(chg, span=n, min_periods=n)
return vol if not ann else vol * np.sqrt(260)
else:
chg = px.pct_change() if model == 'pct' else np.log(px / px.shift(1))
if rolling == 'simple':
vol = pd.rolling_std(chg, n, min_periods=min_periods)
else:
vol = pd.ewmstd(chg, span=n, min_periods=n)
return vol if not ann else vol * np.sqrt(periods_in_year(vol)) | python | def volatility(self, n, freq=None, which='close', ann=True, model='ln', min_periods=1, rolling='simple'):
if model not in ('bbg', 'ln', 'pct'):
raise ValueError('model must be one of (bbg, ln, pct), not %s' % model)
if rolling not in ('simple', 'exp'):
raise ValueError('rolling must be one of (simple, exp), not %s' % rolling)
px = self.frame[which]
px = px if not freq else px.resample(freq, how='last')
if model == 'bbg' and periods_in_year(px) == 252:
# Bloomberg uses business days, so need to convert and reindex
orig = px.index
px = px.resample('B').ffill()
chg = np.log(px / px.shift(1))
chg[chg.index - orig] = np.nan
if rolling == 'simple':
vol = pd.rolling_std(chg, n, min_periods=min_periods).reindex(orig)
else:
vol = pd.ewmstd(chg, span=n, min_periods=n)
return vol if not ann else vol * np.sqrt(260)
else:
chg = px.pct_change() if model == 'pct' else np.log(px / px.shift(1))
if rolling == 'simple':
vol = pd.rolling_std(chg, n, min_periods=min_periods)
else:
vol = pd.ewmstd(chg, span=n, min_periods=n)
return vol if not ann else vol * np.sqrt(periods_in_year(vol)) | [
"def",
"volatility",
"(",
"self",
",",
"n",
",",
"freq",
"=",
"None",
",",
"which",
"=",
"'close'",
",",
"ann",
"=",
"True",
",",
"model",
"=",
"'ln'",
",",
"min_periods",
"=",
"1",
",",
"rolling",
"=",
"'simple'",
")",
":",
"if",
"model",
"not",
... | Return the annualized volatility series. N is the number of lookback periods.
:param n: int, number of lookback periods
:param freq: resample frequency or None
:param which: price series to use
:param ann: If True then annualize
:param model: {'ln', 'pct', 'bbg'}
ln - use logarithmic price changes
pct - use pct price changes
bbg - use logarithmic price changes but Bloomberg uses actual business days
:param rolling:{'simple', 'exp'}, if exp, use ewmstd. if simple, use rolling_std
:return: | [
"Return",
"the",
"annualized",
"volatility",
"series",
".",
"N",
"is",
"the",
"number",
"of",
"lookback",
"periods",
"."
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/ins.py#L36-L74 |
226,862 | bpsmith/tia | tia/analysis/model/ins.py | Instrument.get_mkt_val | def get_mkt_val(self, pxs=None):
"""Return the market value series for the series of pxs"""
pxs = pxs if pxs is not None else self.pxs.close
return pxs * self.multiplier | python | def get_mkt_val(self, pxs=None):
pxs = pxs if pxs is not None else self.pxs.close
return pxs * self.multiplier | [
"def",
"get_mkt_val",
"(",
"self",
",",
"pxs",
"=",
"None",
")",
":",
"pxs",
"=",
"pxs",
"if",
"pxs",
"is",
"not",
"None",
"else",
"self",
".",
"pxs",
".",
"close",
"return",
"pxs",
"*",
"self",
".",
"multiplier"
] | Return the market value series for the series of pxs | [
"Return",
"the",
"market",
"value",
"series",
"for",
"the",
"series",
"of",
"pxs"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/ins.py#L85-L88 |
226,863 | bpsmith/tia | tia/analysis/model/ins.py | Instrument.get_eod_frame | def get_eod_frame(self):
"""Return the eod market data frame for pricing"""
close = self.pxs.close
mktval = self.get_mkt_val(close)
dvds = self.pxs.dvds
df = pd.DataFrame({'close': close, 'mkt_val': mktval, 'dvds': dvds})
df.index.name = 'date'
return df | python | def get_eod_frame(self):
close = self.pxs.close
mktval = self.get_mkt_val(close)
dvds = self.pxs.dvds
df = pd.DataFrame({'close': close, 'mkt_val': mktval, 'dvds': dvds})
df.index.name = 'date'
return df | [
"def",
"get_eod_frame",
"(",
"self",
")",
":",
"close",
"=",
"self",
".",
"pxs",
".",
"close",
"mktval",
"=",
"self",
".",
"get_mkt_val",
"(",
"close",
")",
"dvds",
"=",
"self",
".",
"pxs",
".",
"dvds",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
... | Return the eod market data frame for pricing | [
"Return",
"the",
"eod",
"market",
"data",
"frame",
"for",
"pricing"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/ins.py#L93-L100 |
226,864 | bpsmith/tia | tia/analysis/model/ins.py | Instrument.truncate | def truncate(self, before=None, after=None):
"""Return an instrument with prices starting at before and ending at after"""
pxframe = self.pxs.frame
if (before is None or before == pxframe.index[0]) and (after is None or after == pxframe.index[-1]):
return self
else:
tpxs = self.pxs.frame.truncate(before, after)
return Instrument(self.sid, InstrumentPrices(tpxs), multiplier=self.multiplier) | python | def truncate(self, before=None, after=None):
pxframe = self.pxs.frame
if (before is None or before == pxframe.index[0]) and (after is None or after == pxframe.index[-1]):
return self
else:
tpxs = self.pxs.frame.truncate(before, after)
return Instrument(self.sid, InstrumentPrices(tpxs), multiplier=self.multiplier) | [
"def",
"truncate",
"(",
"self",
",",
"before",
"=",
"None",
",",
"after",
"=",
"None",
")",
":",
"pxframe",
"=",
"self",
".",
"pxs",
".",
"frame",
"if",
"(",
"before",
"is",
"None",
"or",
"before",
"==",
"pxframe",
".",
"index",
"[",
"0",
"]",
")... | Return an instrument with prices starting at before and ending at after | [
"Return",
"an",
"instrument",
"with",
"prices",
"starting",
"at",
"before",
"and",
"ending",
"at",
"after"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/ins.py#L102-L109 |
226,865 | bpsmith/tia | tia/analysis/util.py | insert_level | def insert_level(df, label, level=0, copy=0, axis=0, level_name=None):
"""Add a new level to the index with the specified label. The newly created index will be a MultiIndex.
:param df: DataFrame
:param label: label to insert
:param copy: If True, copy the DataFrame before assigning new index
:param axis: If 0, then columns. If 1, then index
:return:
"""
df = df if not copy else df.copy()
src = df.columns if axis == 0 else df.index
current = [src.get_level_values(lvl) for lvl in range(src.nlevels)]
current.insert(level, [label] * len(src))
idx = pd.MultiIndex.from_arrays(current)
level_name and idx.set_names(level_name, level, inplace=1)
if axis == 0:
df.columns = idx
else:
df.index = idx
return df | python | def insert_level(df, label, level=0, copy=0, axis=0, level_name=None):
df = df if not copy else df.copy()
src = df.columns if axis == 0 else df.index
current = [src.get_level_values(lvl) for lvl in range(src.nlevels)]
current.insert(level, [label] * len(src))
idx = pd.MultiIndex.from_arrays(current)
level_name and idx.set_names(level_name, level, inplace=1)
if axis == 0:
df.columns = idx
else:
df.index = idx
return df | [
"def",
"insert_level",
"(",
"df",
",",
"label",
",",
"level",
"=",
"0",
",",
"copy",
"=",
"0",
",",
"axis",
"=",
"0",
",",
"level_name",
"=",
"None",
")",
":",
"df",
"=",
"df",
"if",
"not",
"copy",
"else",
"df",
".",
"copy",
"(",
")",
"src",
... | Add a new level to the index with the specified label. The newly created index will be a MultiIndex.
:param df: DataFrame
:param label: label to insert
:param copy: If True, copy the DataFrame before assigning new index
:param axis: If 0, then columns. If 1, then index
:return: | [
"Add",
"a",
"new",
"level",
"to",
"the",
"index",
"with",
"the",
"specified",
"label",
".",
"The",
"newly",
"created",
"index",
"will",
"be",
"a",
"MultiIndex",
"."
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/util.py#L105-L124 |
226,866 | bpsmith/tia | tia/analysis/talib_wrapper.py | APO | def APO(series, fast=12, slow=26, matype=0):
"""double exponential moving average"""
return _series_to_series(series, talib.APO, fast, slow, matype) | python | def APO(series, fast=12, slow=26, matype=0):
return _series_to_series(series, talib.APO, fast, slow, matype) | [
"def",
"APO",
"(",
"series",
",",
"fast",
"=",
"12",
",",
"slow",
"=",
"26",
",",
"matype",
"=",
"0",
")",
":",
"return",
"_series_to_series",
"(",
"series",
",",
"talib",
".",
"APO",
",",
"fast",
",",
"slow",
",",
"matype",
")"
] | double exponential moving average | [
"double",
"exponential",
"moving",
"average"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/talib_wrapper.py#L67-L69 |
226,867 | bpsmith/tia | tia/analysis/talib_wrapper.py | MAMA | def MAMA(series, fast=.5, slow=.05):
"""MESA Adaptive Moving Average"""
return _series_to_frame(series, ['MAMA', 'FAMA'], talib.MAMA, fast, slow) | python | def MAMA(series, fast=.5, slow=.05):
return _series_to_frame(series, ['MAMA', 'FAMA'], talib.MAMA, fast, slow) | [
"def",
"MAMA",
"(",
"series",
",",
"fast",
"=",
".5",
",",
"slow",
"=",
".05",
")",
":",
"return",
"_series_to_frame",
"(",
"series",
",",
"[",
"'MAMA'",
",",
"'FAMA'",
"]",
",",
"talib",
".",
"MAMA",
",",
"fast",
",",
"slow",
")"
] | MESA Adaptive Moving Average | [
"MESA",
"Adaptive",
"Moving",
"Average"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/talib_wrapper.py#L462-L464 |
226,868 | bpsmith/tia | tia/analysis/talib_wrapper.py | MFI | def MFI(frame, n=14, high_col='high', low_col='low', close_col='close', vol_col='Volume'):
"""money flow inedx"""
return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.MFI, n) | python | def MFI(frame, n=14, high_col='high', low_col='low', close_col='close', vol_col='Volume'):
return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.MFI, n) | [
"def",
"MFI",
"(",
"frame",
",",
"n",
"=",
"14",
",",
"high_col",
"=",
"'high'",
",",
"low_col",
"=",
"'low'",
",",
"close_col",
"=",
"'close'",
",",
"vol_col",
"=",
"'Volume'",
")",
":",
"return",
"_frame_to_series",
"(",
"frame",
",",
"[",
"high_col"... | money flow inedx | [
"money",
"flow",
"inedx"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/talib_wrapper.py#L475-L477 |
226,869 | bpsmith/tia | tia/analysis/model/trd.py | Trade.split | def split(self, amt):
""" return 2 trades, 1 with specific amt and the other with self.quantity - amt """
ratio = abs(amt / self.qty)
t1 = Trade(self.tid, self.ts, amt, self.px, fees=ratio * self.fees, **self.kwargs)
t2 = Trade(self.tid, self.ts, self.qty - amt, self.px, fees=(1. - ratio) * self.fees,
**self.kwargs)
return [t1, t2] | python | def split(self, amt):
ratio = abs(amt / self.qty)
t1 = Trade(self.tid, self.ts, amt, self.px, fees=ratio * self.fees, **self.kwargs)
t2 = Trade(self.tid, self.ts, self.qty - amt, self.px, fees=(1. - ratio) * self.fees,
**self.kwargs)
return [t1, t2] | [
"def",
"split",
"(",
"self",
",",
"amt",
")",
":",
"ratio",
"=",
"abs",
"(",
"amt",
"/",
"self",
".",
"qty",
")",
"t1",
"=",
"Trade",
"(",
"self",
".",
"tid",
",",
"self",
".",
"ts",
",",
"amt",
",",
"self",
".",
"px",
",",
"fees",
"=",
"ra... | return 2 trades, 1 with specific amt and the other with self.quantity - amt | [
"return",
"2",
"trades",
"1",
"with",
"specific",
"amt",
"and",
"the",
"other",
"with",
"self",
".",
"quantity",
"-",
"amt"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/model/trd.py#L21-L27 |
226,870 | bpsmith/tia | tia/rlab/table.py | pad_positive_wrapper | def pad_positive_wrapper(fmtfct):
"""Ensure that numbers are aligned in table by appending a blank space to postive values if 'parenthesis' are
used to denote negative numbers"""
def check_and_append(*args, **kwargs):
result = fmtfct(*args, **kwargs)
if fmtfct.parens and not result.endswith(')'):
result += ' '
return result
return check_and_append | python | def pad_positive_wrapper(fmtfct):
def check_and_append(*args, **kwargs):
result = fmtfct(*args, **kwargs)
if fmtfct.parens and not result.endswith(')'):
result += ' '
return result
return check_and_append | [
"def",
"pad_positive_wrapper",
"(",
"fmtfct",
")",
":",
"def",
"check_and_append",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"fmtfct",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"fmtfct",
".",
"parens",
"and",
"no... | Ensure that numbers are aligned in table by appending a blank space to postive values if 'parenthesis' are
used to denote negative numbers | [
"Ensure",
"that",
"numbers",
"are",
"aligned",
"in",
"table",
"by",
"appending",
"a",
"blank",
"space",
"to",
"postive",
"values",
"if",
"parenthesis",
"are",
"used",
"to",
"denote",
"negative",
"numbers"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L43-L53 |
226,871 | bpsmith/tia | tia/rlab/table.py | RegionFormatter.iter_rows | def iter_rows(self, start=None, end=None):
"""Iterate each of the Region rows in this region"""
start = start or 0
end = end or self.nrows
for i in range(start, end):
yield self.iloc[i, :] | python | def iter_rows(self, start=None, end=None):
start = start or 0
end = end or self.nrows
for i in range(start, end):
yield self.iloc[i, :] | [
"def",
"iter_rows",
"(",
"self",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"start",
"=",
"start",
"or",
"0",
"end",
"=",
"end",
"or",
"self",
".",
"nrows",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"end",
")",
":",
"yiel... | Iterate each of the Region rows in this region | [
"Iterate",
"each",
"of",
"the",
"Region",
"rows",
"in",
"this",
"region"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L361-L366 |
226,872 | bpsmith/tia | tia/rlab/table.py | RegionFormatter.iter_cols | def iter_cols(self, start=None, end=None):
"""Iterate each of the Region cols in this region"""
start = start or 0
end = end or self.ncols
for i in range(start, end):
yield self.iloc[:, i] | python | def iter_cols(self, start=None, end=None):
start = start or 0
end = end or self.ncols
for i in range(start, end):
yield self.iloc[:, i] | [
"def",
"iter_cols",
"(",
"self",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"start",
"=",
"start",
"or",
"0",
"end",
"=",
"end",
"or",
"self",
".",
"ncols",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"end",
")",
":",
"yiel... | Iterate each of the Region cols in this region | [
"Iterate",
"each",
"of",
"the",
"Region",
"cols",
"in",
"this",
"region"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L368-L373 |
226,873 | bpsmith/tia | tia/rlab/table.py | RegionFormatter.guess_number_format | def guess_number_format(self, rb=1, align=1, **fmt_args):
"""Determine the most appropriate formatter by inspected all the region values"""
fct = fmt.guess_formatter(self.actual_values, **fmt_args)
return self.apply_number_format(fct, rb=rb, align=align) | python | def guess_number_format(self, rb=1, align=1, **fmt_args):
fct = fmt.guess_formatter(self.actual_values, **fmt_args)
return self.apply_number_format(fct, rb=rb, align=align) | [
"def",
"guess_number_format",
"(",
"self",
",",
"rb",
"=",
"1",
",",
"align",
"=",
"1",
",",
"*",
"*",
"fmt_args",
")",
":",
"fct",
"=",
"fmt",
".",
"guess_formatter",
"(",
"self",
".",
"actual_values",
",",
"*",
"*",
"fmt_args",
")",
"return",
"self... | Determine the most appropriate formatter by inspected all the region values | [
"Determine",
"the",
"most",
"appropriate",
"formatter",
"by",
"inspected",
"all",
"the",
"region",
"values"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L554-L557 |
226,874 | bpsmith/tia | tia/rlab/table.py | RegionFormatter.dynamic_number_format | def dynamic_number_format(self, rb=1, align=1, **fmt_args):
"""Formatter changes based on the cell value"""
fct = fmt.DynamicNumberFormatter(**fmt_args)
return self.apply_number_format(fct, rb=rb, align=align) | python | def dynamic_number_format(self, rb=1, align=1, **fmt_args):
fct = fmt.DynamicNumberFormatter(**fmt_args)
return self.apply_number_format(fct, rb=rb, align=align) | [
"def",
"dynamic_number_format",
"(",
"self",
",",
"rb",
"=",
"1",
",",
"align",
"=",
"1",
",",
"*",
"*",
"fmt_args",
")",
":",
"fct",
"=",
"fmt",
".",
"DynamicNumberFormatter",
"(",
"*",
"*",
"fmt_args",
")",
"return",
"self",
".",
"apply_number_format",... | Formatter changes based on the cell value | [
"Formatter",
"changes",
"based",
"on",
"the",
"cell",
"value"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L568-L571 |
226,875 | bpsmith/tia | tia/analysis/pdf_rpts.py | ShortTermReport.add_summary_page | def add_summary_page(self):
"""Build a table which is shown on the first page which gives an overview of the portfolios"""
s = PortfolioSummary()
s.include_long_short()
pieces = []
for r in self.results:
tmp = s(r.port, PortfolioSummary.analyze_returns)
tmp['desc'] = r.desc
tmp['sid'] = r.sid
tmp = tmp.set_index(['sid', 'desc'], append=1).reorder_levels([2, 1, 0])
pieces.append(tmp)
frame = pd.concat(pieces)
tf = self.pdf.table_formatter(frame)
tf.apply_basic_style(cmap=self.table_style)
# [col.guess_format(pcts=1, trunc_dot_zeros=1) for col in tf.cells.iter_cols()]
tf.cells.match_column_labels(['nmonths', 'cnt', 'win cnt', 'lose cnt', 'dur max']).int_format()
tf.cells.match_column_labels(['sharpe ann', 'sortino', 'dur avg']).float_format(precision=1)
tf.cells.match_column_labels(['maxdd dt']).apply_format(new_datetime_formatter('%d-%b-%y'))
tf.cells.match_column_labels(['cagr', 'mret avg', 'mret std ann', 'ret std', 'mret avg ann', 'maxdd', 'avg dd',
'winpct', 'ret avg', 'ret min', 'ret max']).percent_format()
self.pdf.build_page('summary', {'F1': tf.build()}) | python | def add_summary_page(self):
s = PortfolioSummary()
s.include_long_short()
pieces = []
for r in self.results:
tmp = s(r.port, PortfolioSummary.analyze_returns)
tmp['desc'] = r.desc
tmp['sid'] = r.sid
tmp = tmp.set_index(['sid', 'desc'], append=1).reorder_levels([2, 1, 0])
pieces.append(tmp)
frame = pd.concat(pieces)
tf = self.pdf.table_formatter(frame)
tf.apply_basic_style(cmap=self.table_style)
# [col.guess_format(pcts=1, trunc_dot_zeros=1) for col in tf.cells.iter_cols()]
tf.cells.match_column_labels(['nmonths', 'cnt', 'win cnt', 'lose cnt', 'dur max']).int_format()
tf.cells.match_column_labels(['sharpe ann', 'sortino', 'dur avg']).float_format(precision=1)
tf.cells.match_column_labels(['maxdd dt']).apply_format(new_datetime_formatter('%d-%b-%y'))
tf.cells.match_column_labels(['cagr', 'mret avg', 'mret std ann', 'ret std', 'mret avg ann', 'maxdd', 'avg dd',
'winpct', 'ret avg', 'ret min', 'ret max']).percent_format()
self.pdf.build_page('summary', {'F1': tf.build()}) | [
"def",
"add_summary_page",
"(",
"self",
")",
":",
"s",
"=",
"PortfolioSummary",
"(",
")",
"s",
".",
"include_long_short",
"(",
")",
"pieces",
"=",
"[",
"]",
"for",
"r",
"in",
"self",
".",
"results",
":",
"tmp",
"=",
"s",
"(",
"r",
".",
"port",
",",... | Build a table which is shown on the first page which gives an overview of the portfolios | [
"Build",
"a",
"table",
"which",
"is",
"shown",
"on",
"the",
"first",
"page",
"which",
"gives",
"an",
"overview",
"of",
"the",
"portfolios"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/analysis/pdf_rpts.py#L73-L95 |
226,876 | bpsmith/tia | tia/bbg/v3api.py | Request.set_flag | def set_flag(self, request, val, fld):
"""If the specified val is not None, then set the specified field to its boolean value"""
if val is not None:
val = bool(val)
request.set(fld, val) | python | def set_flag(self, request, val, fld):
if val is not None:
val = bool(val)
request.set(fld, val) | [
"def",
"set_flag",
"(",
"self",
",",
"request",
",",
"val",
",",
"fld",
")",
":",
"if",
"val",
"is",
"not",
"None",
":",
"val",
"=",
"bool",
"(",
"val",
")",
"request",
".",
"set",
"(",
"fld",
",",
"val",
")"
] | If the specified val is not None, then set the specified field to its boolean value | [
"If",
"the",
"specified",
"val",
"is",
"not",
"None",
"then",
"set",
"the",
"specified",
"field",
"to",
"its",
"boolean",
"value"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/bbg/v3api.py#L238-L242 |
226,877 | bpsmith/tia | tia/bbg/bbg_com.py | XmlHelper.as_security_error | def as_security_error(node, secid):
""" convert the securityError element to a SecurityError """
assert node.Name == 'securityError'
src = XmlHelper.get_child_value(node, 'source')
code = XmlHelper.get_child_value(node, 'code')
cat = XmlHelper.get_child_value(node, 'category')
msg = XmlHelper.get_child_value(node, 'message')
subcat = XmlHelper.get_child_value(node, 'subcategory')
return SecurityError(security=secid, source=src, code=code, category=cat, message=msg, subcategory=subcat) | python | def as_security_error(node, secid):
assert node.Name == 'securityError'
src = XmlHelper.get_child_value(node, 'source')
code = XmlHelper.get_child_value(node, 'code')
cat = XmlHelper.get_child_value(node, 'category')
msg = XmlHelper.get_child_value(node, 'message')
subcat = XmlHelper.get_child_value(node, 'subcategory')
return SecurityError(security=secid, source=src, code=code, category=cat, message=msg, subcategory=subcat) | [
"def",
"as_security_error",
"(",
"node",
",",
"secid",
")",
":",
"assert",
"node",
".",
"Name",
"==",
"'securityError'",
"src",
"=",
"XmlHelper",
".",
"get_child_value",
"(",
"node",
",",
"'source'",
")",
"code",
"=",
"XmlHelper",
".",
"get_child_value",
"("... | convert the securityError element to a SecurityError | [
"convert",
"the",
"securityError",
"element",
"to",
"a",
"SecurityError"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/bbg/bbg_com.py#L118-L126 |
226,878 | bpsmith/tia | tia/bbg/bbg_com.py | ResponseHandler.do_init | def do_init(self, handler):
""" will be called prior to waiting for the message """
self.waiting = True
self.exc_info = None
self.handler = handler | python | def do_init(self, handler):
self.waiting = True
self.exc_info = None
self.handler = handler | [
"def",
"do_init",
"(",
"self",
",",
"handler",
")",
":",
"self",
".",
"waiting",
"=",
"True",
"self",
".",
"exc_info",
"=",
"None",
"self",
".",
"handler",
"=",
"handler"
] | will be called prior to waiting for the message | [
"will",
"be",
"called",
"prior",
"to",
"waiting",
"for",
"the",
"message"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/bbg/bbg_com.py#L178-L182 |
226,879 | bpsmith/tia | tia/bbg/bbg_com.py | ReferenceDataRequest.response_as_series | def response_as_series(self):
""" Return the response as a single series """
assert len(self.symbols) == 1, 'expected single request'
if self.response_type == 'frame':
return self.response.ix[self.symbols[0]]
else:
return pandas.Series(self.response[self.symbols]) | python | def response_as_series(self):
assert len(self.symbols) == 1, 'expected single request'
if self.response_type == 'frame':
return self.response.ix[self.symbols[0]]
else:
return pandas.Series(self.response[self.symbols]) | [
"def",
"response_as_series",
"(",
"self",
")",
":",
"assert",
"len",
"(",
"self",
".",
"symbols",
")",
"==",
"1",
",",
"'expected single request'",
"if",
"self",
".",
"response_type",
"==",
"'frame'",
":",
"return",
"self",
".",
"response",
".",
"ix",
"[",... | Return the response as a single series | [
"Return",
"the",
"response",
"as",
"a",
"single",
"series"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/bbg/bbg_com.py#L336-L342 |
226,880 | bpsmith/tia | tia/bbg/bbg_com.py | HistoricalDataRequest.response_as_single | def response_as_single(self, copy=0):
""" convert the response map to a single data frame with Multi-Index columns """
arr = []
for sid, frame in self.response.iteritems():
if copy:
frame = frame.copy()
'security' not in frame and frame.insert(0, 'security', sid)
arr.append(frame.reset_index().set_index(['date', 'security']))
return concat(arr).unstack() | python | def response_as_single(self, copy=0):
arr = []
for sid, frame in self.response.iteritems():
if copy:
frame = frame.copy()
'security' not in frame and frame.insert(0, 'security', sid)
arr.append(frame.reset_index().set_index(['date', 'security']))
return concat(arr).unstack() | [
"def",
"response_as_single",
"(",
"self",
",",
"copy",
"=",
"0",
")",
":",
"arr",
"=",
"[",
"]",
"for",
"sid",
",",
"frame",
"in",
"self",
".",
"response",
".",
"iteritems",
"(",
")",
":",
"if",
"copy",
":",
"frame",
"=",
"frame",
".",
"copy",
"(... | convert the response map to a single data frame with Multi-Index columns | [
"convert",
"the",
"response",
"map",
"to",
"a",
"single",
"data",
"frame",
"with",
"Multi",
"-",
"Index",
"columns"
] | a7043b6383e557aeea8fc7112bbffd6e36a230e9 | https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/bbg/bbg_com.py#L430-L438 |
226,881 | metric-learn/metric-learn | metric_learn/lsml.py | LSML_Supervised.fit | def fit(self, X, y, random_state=np.random):
"""Create constraints from labels and learn the LSML model.
Parameters
----------
X : (n x d) matrix
Input data, where each row corresponds to a single instance.
y : (n) array-like
Data labels.
random_state : numpy.random.RandomState, optional
If provided, controls random number generation.
"""
if self.num_labeled != 'deprecated':
warnings.warn('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0', DeprecationWarning)
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
num_constraints = self.num_constraints
if num_constraints is None:
num_classes = len(np.unique(y))
num_constraints = 20 * num_classes**2
c = Constraints(y)
pos_neg = c.positive_negative_pairs(num_constraints, same_length=True,
random_state=random_state)
return _BaseLSML._fit(self, X[np.column_stack(pos_neg)],
weights=self.weights) | python | def fit(self, X, y, random_state=np.random):
if self.num_labeled != 'deprecated':
warnings.warn('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0', DeprecationWarning)
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
num_constraints = self.num_constraints
if num_constraints is None:
num_classes = len(np.unique(y))
num_constraints = 20 * num_classes**2
c = Constraints(y)
pos_neg = c.positive_negative_pairs(num_constraints, same_length=True,
random_state=random_state)
return _BaseLSML._fit(self, X[np.column_stack(pos_neg)],
weights=self.weights) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"random_state",
"=",
"np",
".",
"random",
")",
":",
"if",
"self",
".",
"num_labeled",
"!=",
"'deprecated'",
":",
"warnings",
".",
"warn",
"(",
"'\"num_labeled\" parameter is not used.'",
"' It has been deprec... | Create constraints from labels and learn the LSML model.
Parameters
----------
X : (n x d) matrix
Input data, where each row corresponds to a single instance.
y : (n) array-like
Data labels.
random_state : numpy.random.RandomState, optional
If provided, controls random number generation. | [
"Create",
"constraints",
"from",
"labels",
"and",
"learn",
"the",
"LSML",
"model",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/lsml.py#L220-L248 |
226,882 | metric-learn/metric-learn | metric_learn/mlkr.py | MLKR.fit | def fit(self, X, y):
"""
Fit MLKR model
Parameters
----------
X : (n x d) array of samples
y : (n) data labels
"""
X, y = self._prepare_inputs(X, y, y_numeric=True,
ensure_min_samples=2)
n, d = X.shape
if y.shape[0] != n:
raise ValueError('Data and label lengths mismatch: %d != %d'
% (n, y.shape[0]))
A = self.A0
m = self.num_dims
if m is None:
m = d
if A is None:
# initialize to PCA transformation matrix
# note: not the same as n_components=m !
A = PCA().fit(X).components_.T[:m]
elif A.shape != (m, d):
raise ValueError('A0 needs shape (%d,%d) but got %s' % (
m, d, A.shape))
# Measure the total training time
train_time = time.time()
self.n_iter_ = 0
res = minimize(self._loss, A.ravel(), (X, y), method='L-BFGS-B',
jac=True, tol=self.tol,
options=dict(maxiter=self.max_iter))
self.transformer_ = res.x.reshape(A.shape)
# Stop timer
train_time = time.time() - train_time
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not res.success:
warnings.warn('[{}] MLKR did not converge: {}'
.format(cls_name, res.message), ConvergenceWarning)
print('[{}] Training took {:8.2f}s.'.format(cls_name, train_time))
return self | python | def fit(self, X, y):
X, y = self._prepare_inputs(X, y, y_numeric=True,
ensure_min_samples=2)
n, d = X.shape
if y.shape[0] != n:
raise ValueError('Data and label lengths mismatch: %d != %d'
% (n, y.shape[0]))
A = self.A0
m = self.num_dims
if m is None:
m = d
if A is None:
# initialize to PCA transformation matrix
# note: not the same as n_components=m !
A = PCA().fit(X).components_.T[:m]
elif A.shape != (m, d):
raise ValueError('A0 needs shape (%d,%d) but got %s' % (
m, d, A.shape))
# Measure the total training time
train_time = time.time()
self.n_iter_ = 0
res = minimize(self._loss, A.ravel(), (X, y), method='L-BFGS-B',
jac=True, tol=self.tol,
options=dict(maxiter=self.max_iter))
self.transformer_ = res.x.reshape(A.shape)
# Stop timer
train_time = time.time() - train_time
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not res.success:
warnings.warn('[{}] MLKR did not converge: {}'
.format(cls_name, res.message), ConvergenceWarning)
print('[{}] Training took {:8.2f}s.'.format(cls_name, train_time))
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"X",
",",
"y",
"=",
"self",
".",
"_prepare_inputs",
"(",
"X",
",",
"y",
",",
"y_numeric",
"=",
"True",
",",
"ensure_min_samples",
"=",
"2",
")",
"n",
",",
"d",
"=",
"X",
".",
"shape",
"... | Fit MLKR model
Parameters
----------
X : (n x d) array of samples
y : (n) data labels | [
"Fit",
"MLKR",
"model"
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/mlkr.py#L73-L120 |
226,883 | metric-learn/metric-learn | metric_learn/constraints.py | Constraints.chunks | def chunks(self, num_chunks=100, chunk_size=2, random_state=np.random):
"""
the random state object to be passed must be a numpy random seed
"""
chunks = -np.ones_like(self.known_label_idx, dtype=int)
uniq, lookup = np.unique(self.known_labels, return_inverse=True)
all_inds = [set(np.where(lookup==c)[0]) for c in xrange(len(uniq))]
idx = 0
while idx < num_chunks and all_inds:
if len(all_inds) == 1:
c = 0
else:
c = random_state.randint(0, high=len(all_inds)-1)
inds = all_inds[c]
if len(inds) < chunk_size:
del all_inds[c]
continue
ii = random_state.choice(list(inds), chunk_size, replace=False)
inds.difference_update(ii)
chunks[ii] = idx
idx += 1
if idx < num_chunks:
raise ValueError('Unable to make %d chunks of %d examples each' %
(num_chunks, chunk_size))
return chunks | python | def chunks(self, num_chunks=100, chunk_size=2, random_state=np.random):
chunks = -np.ones_like(self.known_label_idx, dtype=int)
uniq, lookup = np.unique(self.known_labels, return_inverse=True)
all_inds = [set(np.where(lookup==c)[0]) for c in xrange(len(uniq))]
idx = 0
while idx < num_chunks and all_inds:
if len(all_inds) == 1:
c = 0
else:
c = random_state.randint(0, high=len(all_inds)-1)
inds = all_inds[c]
if len(inds) < chunk_size:
del all_inds[c]
continue
ii = random_state.choice(list(inds), chunk_size, replace=False)
inds.difference_update(ii)
chunks[ii] = idx
idx += 1
if idx < num_chunks:
raise ValueError('Unable to make %d chunks of %d examples each' %
(num_chunks, chunk_size))
return chunks | [
"def",
"chunks",
"(",
"self",
",",
"num_chunks",
"=",
"100",
",",
"chunk_size",
"=",
"2",
",",
"random_state",
"=",
"np",
".",
"random",
")",
":",
"chunks",
"=",
"-",
"np",
".",
"ones_like",
"(",
"self",
".",
"known_label_idx",
",",
"dtype",
"=",
"in... | the random state object to be passed must be a numpy random seed | [
"the",
"random",
"state",
"object",
"to",
"be",
"passed",
"must",
"be",
"a",
"numpy",
"random",
"seed"
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/constraints.py#L66-L90 |
226,884 | metric-learn/metric-learn | metric_learn/mmc.py | _BaseMMC._fD | def _fD(self, neg_pairs, A):
"""The value of the dissimilarity constraint function.
f = f(\sum_{ij \in D} distance(x_i, x_j))
i.e. distance can be L1: \sqrt{(x_i-x_j)A(x_i-x_j)'}
"""
diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :]
return np.log(np.sum(np.sqrt(np.sum(np.dot(diff, A) * diff, axis=1))) + 1e-6) | python | def _fD(self, neg_pairs, A):
diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :]
return np.log(np.sum(np.sqrt(np.sum(np.dot(diff, A) * diff, axis=1))) + 1e-6) | [
"def",
"_fD",
"(",
"self",
",",
"neg_pairs",
",",
"A",
")",
":",
"diff",
"=",
"neg_pairs",
"[",
":",
",",
"0",
",",
":",
"]",
"-",
"neg_pairs",
"[",
":",
",",
"1",
",",
":",
"]",
"return",
"np",
".",
"log",
"(",
"np",
".",
"sum",
"(",
"np",... | The value of the dissimilarity constraint function.
f = f(\sum_{ij \in D} distance(x_i, x_j))
i.e. distance can be L1: \sqrt{(x_i-x_j)A(x_i-x_j)'} | [
"The",
"value",
"of",
"the",
"dissimilarity",
"constraint",
"function",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/mmc.py#L273-L280 |
226,885 | metric-learn/metric-learn | metric_learn/mmc.py | _BaseMMC._fD1 | def _fD1(self, neg_pairs, A):
"""The gradient of the dissimilarity constraint function w.r.t. A.
For example, let distance by L1 norm:
f = f(\sum_{ij \in D} \sqrt{(x_i-x_j)A(x_i-x_j)'})
df/dA_{kl} = f'* d(\sum_{ij \in D} \sqrt{(x_i-x_j)^k*(x_i-x_j)^l})/dA_{kl}
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
df/dA = f'(\sum_{ij \in D} \sqrt{tr(d_ij'*d_ij*A)})
* 0.5*(\sum_{ij \in D} (1/sqrt{tr(d_ij'*d_ij*A)})*(d_ij'*d_ij))
"""
dim = neg_pairs.shape[2]
diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :]
# outer products of all rows in `diff`
M = np.einsum('ij,ik->ijk', diff, diff)
# faster version of: dist = np.sqrt(np.sum(M * A[None,:,:], axis=(1,2)))
dist = np.sqrt(np.einsum('ijk,jk', M, A))
# faster version of: sum_deri = np.sum(M / (2 * (dist[:,None,None] + 1e-6)), axis=0)
sum_deri = np.einsum('ijk,i->jk', M, 0.5 / (dist + 1e-6))
sum_dist = dist.sum()
return sum_deri / (sum_dist + 1e-6) | python | def _fD1(self, neg_pairs, A):
dim = neg_pairs.shape[2]
diff = neg_pairs[:, 0, :] - neg_pairs[:, 1, :]
# outer products of all rows in `diff`
M = np.einsum('ij,ik->ijk', diff, diff)
# faster version of: dist = np.sqrt(np.sum(M * A[None,:,:], axis=(1,2)))
dist = np.sqrt(np.einsum('ijk,jk', M, A))
# faster version of: sum_deri = np.sum(M / (2 * (dist[:,None,None] + 1e-6)), axis=0)
sum_deri = np.einsum('ijk,i->jk', M, 0.5 / (dist + 1e-6))
sum_dist = dist.sum()
return sum_deri / (sum_dist + 1e-6) | [
"def",
"_fD1",
"(",
"self",
",",
"neg_pairs",
",",
"A",
")",
":",
"dim",
"=",
"neg_pairs",
".",
"shape",
"[",
"2",
"]",
"diff",
"=",
"neg_pairs",
"[",
":",
",",
"0",
",",
":",
"]",
"-",
"neg_pairs",
"[",
":",
",",
"1",
",",
":",
"]",
"# outer... | The gradient of the dissimilarity constraint function w.r.t. A.
For example, let distance by L1 norm:
f = f(\sum_{ij \in D} \sqrt{(x_i-x_j)A(x_i-x_j)'})
df/dA_{kl} = f'* d(\sum_{ij \in D} \sqrt{(x_i-x_j)^k*(x_i-x_j)^l})/dA_{kl}
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
df/dA = f'(\sum_{ij \in D} \sqrt{tr(d_ij'*d_ij*A)})
* 0.5*(\sum_{ij \in D} (1/sqrt{tr(d_ij'*d_ij*A)})*(d_ij'*d_ij)) | [
"The",
"gradient",
"of",
"the",
"dissimilarity",
"constraint",
"function",
"w",
".",
"r",
".",
"t",
".",
"A",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/mmc.py#L282-L303 |
226,886 | metric-learn/metric-learn | metric_learn/mmc.py | _BaseMMC._fS1 | def _fS1(self, pos_pairs, A):
"""The gradient of the similarity constraint function w.r.t. A.
f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij'
df/dA = d(d_ij*A*d_ij')/dA
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij
"""
dim = pos_pairs.shape[2]
diff = pos_pairs[:, 0, :] - pos_pairs[:, 1, :]
return np.einsum('ij,ik->jk', diff, diff) | python | def _fS1(self, pos_pairs, A):
dim = pos_pairs.shape[2]
diff = pos_pairs[:, 0, :] - pos_pairs[:, 1, :]
return np.einsum('ij,ik->jk', diff, diff) | [
"def",
"_fS1",
"(",
"self",
",",
"pos_pairs",
",",
"A",
")",
":",
"dim",
"=",
"pos_pairs",
".",
"shape",
"[",
"2",
"]",
"diff",
"=",
"pos_pairs",
"[",
":",
",",
"0",
",",
":",
"]",
"-",
"pos_pairs",
"[",
":",
",",
"1",
",",
":",
"]",
"return"... | The gradient of the similarity constraint function w.r.t. A.
f = \sum_{ij}(x_i-x_j)A(x_i-x_j)' = \sum_{ij}d_ij*A*d_ij'
df/dA = d(d_ij*A*d_ij')/dA
Note that d_ij*A*d_ij' = tr(d_ij*A*d_ij') = tr(d_ij'*d_ij*A)
so, d(d_ij*A*d_ij')/dA = d_ij'*d_ij | [
"The",
"gradient",
"of",
"the",
"similarity",
"constraint",
"function",
"w",
".",
"r",
".",
"t",
".",
"A",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/mmc.py#L305-L316 |
226,887 | metric-learn/metric-learn | metric_learn/mmc.py | MMC.fit | def fit(self, pairs, y, calibration_params=None):
"""Learn the MMC model.
The threshold will be calibrated on the trainset using the parameters
`calibration_params`.
Parameters
----------
pairs : array-like, shape=(n_constraints, 2, n_features) or
(n_constraints, 2)
3D Array of pairs with each row corresponding to two points,
or 2D array of indices of pairs if the metric learner uses a
preprocessor.
y : array-like, of shape (n_constraints,)
Labels of constraints. Should be -1 for dissimilar pair, 1 for similar.
calibration_params : `dict` or `None`
Dictionary of parameters to give to `calibrate_threshold` for the
threshold calibration step done at the end of `fit`. If `None` is
given, `calibrate_threshold` will use the default parameters.
Returns
-------
self : object
Returns the instance.
"""
calibration_params = (calibration_params if calibration_params is not
None else dict())
self._validate_calibration_params(**calibration_params)
self._fit(pairs, y)
self.calibrate_threshold(pairs, y, **calibration_params)
return self | python | def fit(self, pairs, y, calibration_params=None):
calibration_params = (calibration_params if calibration_params is not
None else dict())
self._validate_calibration_params(**calibration_params)
self._fit(pairs, y)
self.calibrate_threshold(pairs, y, **calibration_params)
return self | [
"def",
"fit",
"(",
"self",
",",
"pairs",
",",
"y",
",",
"calibration_params",
"=",
"None",
")",
":",
"calibration_params",
"=",
"(",
"calibration_params",
"if",
"calibration_params",
"is",
"not",
"None",
"else",
"dict",
"(",
")",
")",
"self",
".",
"_valida... | Learn the MMC model.
The threshold will be calibrated on the trainset using the parameters
`calibration_params`.
Parameters
----------
pairs : array-like, shape=(n_constraints, 2, n_features) or
(n_constraints, 2)
3D Array of pairs with each row corresponding to two points,
or 2D array of indices of pairs if the metric learner uses a
preprocessor.
y : array-like, of shape (n_constraints,)
Labels of constraints. Should be -1 for dissimilar pair, 1 for similar.
calibration_params : `dict` or `None`
Dictionary of parameters to give to `calibrate_threshold` for the
threshold calibration step done at the end of `fit`. If `None` is
given, `calibrate_threshold` will use the default parameters.
Returns
-------
self : object
Returns the instance. | [
"Learn",
"the",
"MMC",
"model",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/mmc.py#L369-L398 |
226,888 | metric-learn/metric-learn | metric_learn/mmc.py | MMC_Supervised.fit | def fit(self, X, y, random_state=np.random):
"""Create constraints from labels and learn the MMC model.
Parameters
----------
X : (n x d) matrix
Input data, where each row corresponds to a single instance.
y : (n) array-like
Data labels.
random_state : numpy.random.RandomState, optional
If provided, controls random number generation.
"""
if self.num_labeled != 'deprecated':
warnings.warn('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0', DeprecationWarning)
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
num_constraints = self.num_constraints
if num_constraints is None:
num_classes = len(np.unique(y))
num_constraints = 20 * num_classes**2
c = Constraints(y)
pos_neg = c.positive_negative_pairs(num_constraints,
random_state=random_state)
pairs, y = wrap_pairs(X, pos_neg)
return _BaseMMC._fit(self, pairs, y) | python | def fit(self, X, y, random_state=np.random):
if self.num_labeled != 'deprecated':
warnings.warn('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0', DeprecationWarning)
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
num_constraints = self.num_constraints
if num_constraints is None:
num_classes = len(np.unique(y))
num_constraints = 20 * num_classes**2
c = Constraints(y)
pos_neg = c.positive_negative_pairs(num_constraints,
random_state=random_state)
pairs, y = wrap_pairs(X, pos_neg)
return _BaseMMC._fit(self, pairs, y) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"random_state",
"=",
"np",
".",
"random",
")",
":",
"if",
"self",
".",
"num_labeled",
"!=",
"'deprecated'",
":",
"warnings",
".",
"warn",
"(",
"'\"num_labeled\" parameter is not used.'",
"' It has been deprec... | Create constraints from labels and learn the MMC model.
Parameters
----------
X : (n x d) matrix
Input data, where each row corresponds to a single instance.
y : (n) array-like
Data labels.
random_state : numpy.random.RandomState, optional
If provided, controls random number generation. | [
"Create",
"constraints",
"from",
"labels",
"and",
"learn",
"the",
"MMC",
"model",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/mmc.py#L457-L483 |
226,889 | metric-learn/metric-learn | metric_learn/lfda.py | LFDA.fit | def fit(self, X, y):
'''Fit the LFDA model.
Parameters
----------
X : (n, d) array-like
Input data.
y : (n,) array-like
Class labels, one per point of data.
'''
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
unique_classes, y = np.unique(y, return_inverse=True)
n, d = X.shape
num_classes = len(unique_classes)
if self.num_dims is None:
dim = d
else:
if not 0 < self.num_dims <= d:
raise ValueError('Invalid num_dims, must be in [1,%d]' % d)
dim = self.num_dims
if self.k is None:
k = min(7, d - 1)
elif self.k >= d:
warnings.warn('Chosen k (%d) too large, using %d instead.' % (self.k,d-1))
k = d - 1
else:
k = int(self.k)
tSb = np.zeros((d,d))
tSw = np.zeros((d,d))
for c in xrange(num_classes):
Xc = X[y==c]
nc = Xc.shape[0]
# classwise affinity matrix
dist = pairwise_distances(Xc, metric='l2', squared=True)
# distances to k-th nearest neighbor
k = min(k, nc - 1)
sigma = np.sqrt(np.partition(dist, k, axis=0)[:, k])
local_scale = np.outer(sigma, sigma)
with np.errstate(divide='ignore', invalid='ignore'):
A = np.exp(-dist/local_scale)
A[local_scale==0] = 0
G = Xc.T.dot(A.sum(axis=0)[:,None] * Xc) - Xc.T.dot(A).dot(Xc)
tSb += G/n + (1-nc/n)*Xc.T.dot(Xc) + _sum_outer(Xc)/n
tSw += G/nc
tSb -= _sum_outer(X)/n - tSw
# symmetrize
tSb = (tSb + tSb.T) / 2
tSw = (tSw + tSw.T) / 2
vals, vecs = _eigh(tSb, tSw, dim)
order = np.argsort(-vals)[:dim]
vals = vals[order].real
vecs = vecs[:,order]
if self.embedding_type == 'weighted':
vecs *= np.sqrt(vals)
elif self.embedding_type == 'orthonormalized':
vecs, _ = np.linalg.qr(vecs)
self.transformer_ = vecs.T
return self | python | def fit(self, X, y):
'''Fit the LFDA model.
Parameters
----------
X : (n, d) array-like
Input data.
y : (n,) array-like
Class labels, one per point of data.
'''
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
unique_classes, y = np.unique(y, return_inverse=True)
n, d = X.shape
num_classes = len(unique_classes)
if self.num_dims is None:
dim = d
else:
if not 0 < self.num_dims <= d:
raise ValueError('Invalid num_dims, must be in [1,%d]' % d)
dim = self.num_dims
if self.k is None:
k = min(7, d - 1)
elif self.k >= d:
warnings.warn('Chosen k (%d) too large, using %d instead.' % (self.k,d-1))
k = d - 1
else:
k = int(self.k)
tSb = np.zeros((d,d))
tSw = np.zeros((d,d))
for c in xrange(num_classes):
Xc = X[y==c]
nc = Xc.shape[0]
# classwise affinity matrix
dist = pairwise_distances(Xc, metric='l2', squared=True)
# distances to k-th nearest neighbor
k = min(k, nc - 1)
sigma = np.sqrt(np.partition(dist, k, axis=0)[:, k])
local_scale = np.outer(sigma, sigma)
with np.errstate(divide='ignore', invalid='ignore'):
A = np.exp(-dist/local_scale)
A[local_scale==0] = 0
G = Xc.T.dot(A.sum(axis=0)[:,None] * Xc) - Xc.T.dot(A).dot(Xc)
tSb += G/n + (1-nc/n)*Xc.T.dot(Xc) + _sum_outer(Xc)/n
tSw += G/nc
tSb -= _sum_outer(X)/n - tSw
# symmetrize
tSb = (tSb + tSb.T) / 2
tSw = (tSw + tSw.T) / 2
vals, vecs = _eigh(tSb, tSw, dim)
order = np.argsort(-vals)[:dim]
vals = vals[order].real
vecs = vecs[:,order]
if self.embedding_type == 'weighted':
vecs *= np.sqrt(vals)
elif self.embedding_type == 'orthonormalized':
vecs, _ = np.linalg.qr(vecs)
self.transformer_ = vecs.T
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"X",
",",
"y",
"=",
"self",
".",
"_prepare_inputs",
"(",
"X",
",",
"y",
",",
"ensure_min_samples",
"=",
"2",
")",
"unique_classes",
",",
"y",
"=",
"np",
".",
"unique",
"(",
"y",
",",
"ret... | Fit the LFDA model.
Parameters
----------
X : (n, d) array-like
Input data.
y : (n,) array-like
Class labels, one per point of data. | [
"Fit",
"the",
"LFDA",
"model",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/lfda.py#L65-L134 |
226,890 | metric-learn/metric-learn | metric_learn/base_metric.py | BaseMetricLearner._prepare_inputs | def _prepare_inputs(self, X, y=None, type_of_inputs='classic',
**kwargs):
"""Initializes the preprocessor and processes inputs. See `check_input`
for more details.
Parameters
----------
input: array-like
The input data array to check.
y : array-like
The input labels array to check.
type_of_inputs: `str` {'classic', 'tuples'}
The type of inputs to check. If 'classic', the input should be
a 2D array-like of points or a 1D array like of indicators of points. If
'tuples', the input should be a 3D array-like of tuples or a 2D
array-like of indicators of tuples.
**kwargs: dict
Arguments to pass to check_input.
Returns
-------
X : `numpy.ndarray`
The checked input data array.
y: `numpy.ndarray` (optional)
The checked input labels array.
"""
self.check_preprocessor()
return check_input(X, y,
type_of_inputs=type_of_inputs,
preprocessor=self.preprocessor_,
estimator=self,
tuple_size=getattr(self, '_tuple_size', None),
**kwargs) | python | def _prepare_inputs(self, X, y=None, type_of_inputs='classic',
**kwargs):
self.check_preprocessor()
return check_input(X, y,
type_of_inputs=type_of_inputs,
preprocessor=self.preprocessor_,
estimator=self,
tuple_size=getattr(self, '_tuple_size', None),
**kwargs) | [
"def",
"_prepare_inputs",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"type_of_inputs",
"=",
"'classic'",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"check_preprocessor",
"(",
")",
"return",
"check_input",
"(",
"X",
",",
"y",
",",
"type_of_... | Initializes the preprocessor and processes inputs. See `check_input`
for more details.
Parameters
----------
input: array-like
The input data array to check.
y : array-like
The input labels array to check.
type_of_inputs: `str` {'classic', 'tuples'}
The type of inputs to check. If 'classic', the input should be
a 2D array-like of points or a 1D array like of indicators of points. If
'tuples', the input should be a 3D array-like of tuples or a 2D
array-like of indicators of tuples.
**kwargs: dict
Arguments to pass to check_input.
Returns
-------
X : `numpy.ndarray`
The checked input data array.
y: `numpy.ndarray` (optional)
The checked input labels array. | [
"Initializes",
"the",
"preprocessor",
"and",
"processes",
"inputs",
".",
"See",
"check_input",
"for",
"more",
"details",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/base_metric.py#L60-L96 |
226,891 | metric-learn/metric-learn | metric_learn/base_metric.py | MahalanobisMixin.score_pairs | def score_pairs(self, pairs):
"""Returns the learned Mahalanobis distance between pairs.
This distance is defined as: :math:`d_M(x, x') = \sqrt{(x-x')^T M (x-x')}`
where ``M`` is the learned Mahalanobis matrix, for every pair of points
``x`` and ``x'``. This corresponds to the euclidean distance between
embeddings of the points in a new space, obtained through a linear
transformation. Indeed, we have also: :math:`d_M(x, x') = \sqrt{(x_e -
x_e')^T (x_e- x_e')}`, with :math:`x_e = L x` (See
:class:`MahalanobisMixin`).
Parameters
----------
pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2)
3D Array of pairs to score, with each row corresponding to two points,
for 2D array of indices of pairs if the metric learner uses a
preprocessor.
Returns
-------
scores: `numpy.ndarray` of shape=(n_pairs,)
The learned Mahalanobis distance for every pair.
See Also
--------
get_metric : a method that returns a function to compute the metric between
two points. The difference with `score_pairs` is that it works on two 1D
arrays and cannot use a preprocessor. Besides, the returned function is
independent of the metric learner and hence is not modified if the metric
learner is.
:ref:`mahalanobis_distances` : The section of the project documentation
that describes Mahalanobis Distances.
"""
pairs = check_input(pairs, type_of_inputs='tuples',
preprocessor=self.preprocessor_,
estimator=self, tuple_size=2)
pairwise_diffs = self.transform(pairs[:, 1, :] - pairs[:, 0, :])
# (for MahalanobisMixin, the embedding is linear so we can just embed the
# difference)
return np.sqrt(np.sum(pairwise_diffs**2, axis=-1)) | python | def score_pairs(self, pairs):
pairs = check_input(pairs, type_of_inputs='tuples',
preprocessor=self.preprocessor_,
estimator=self, tuple_size=2)
pairwise_diffs = self.transform(pairs[:, 1, :] - pairs[:, 0, :])
# (for MahalanobisMixin, the embedding is linear so we can just embed the
# difference)
return np.sqrt(np.sum(pairwise_diffs**2, axis=-1)) | [
"def",
"score_pairs",
"(",
"self",
",",
"pairs",
")",
":",
"pairs",
"=",
"check_input",
"(",
"pairs",
",",
"type_of_inputs",
"=",
"'tuples'",
",",
"preprocessor",
"=",
"self",
".",
"preprocessor_",
",",
"estimator",
"=",
"self",
",",
"tuple_size",
"=",
"2"... | Returns the learned Mahalanobis distance between pairs.
This distance is defined as: :math:`d_M(x, x') = \sqrt{(x-x')^T M (x-x')}`
where ``M`` is the learned Mahalanobis matrix, for every pair of points
``x`` and ``x'``. This corresponds to the euclidean distance between
embeddings of the points in a new space, obtained through a linear
transformation. Indeed, we have also: :math:`d_M(x, x') = \sqrt{(x_e -
x_e')^T (x_e- x_e')}`, with :math:`x_e = L x` (See
:class:`MahalanobisMixin`).
Parameters
----------
pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2)
3D Array of pairs to score, with each row corresponding to two points,
for 2D array of indices of pairs if the metric learner uses a
preprocessor.
Returns
-------
scores: `numpy.ndarray` of shape=(n_pairs,)
The learned Mahalanobis distance for every pair.
See Also
--------
get_metric : a method that returns a function to compute the metric between
two points. The difference with `score_pairs` is that it works on two 1D
arrays and cannot use a preprocessor. Besides, the returned function is
independent of the metric learner and hence is not modified if the metric
learner is.
:ref:`mahalanobis_distances` : The section of the project documentation
that describes Mahalanobis Distances. | [
"Returns",
"the",
"learned",
"Mahalanobis",
"distance",
"between",
"pairs",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/base_metric.py#L179-L219 |
226,892 | metric-learn/metric-learn | metric_learn/base_metric.py | MahalanobisMixin.transform | def transform(self, X):
"""Embeds data points in the learned linear embedding space.
Transforms samples in ``X`` into ``X_embedded``, samples inside a new
embedding space such that: ``X_embedded = X.dot(L.T)``, where ``L`` is
the learned linear transformation (See :class:`MahalanobisMixin`).
Parameters
----------
X : `numpy.ndarray`, shape=(n_samples, n_features)
The data points to embed.
Returns
-------
X_embedded : `numpy.ndarray`, shape=(n_samples, num_dims)
The embedded data points.
"""
X_checked = check_input(X, type_of_inputs='classic', estimator=self,
preprocessor=self.preprocessor_,
accept_sparse=True)
return X_checked.dot(self.transformer_.T) | python | def transform(self, X):
X_checked = check_input(X, type_of_inputs='classic', estimator=self,
preprocessor=self.preprocessor_,
accept_sparse=True)
return X_checked.dot(self.transformer_.T) | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"X_checked",
"=",
"check_input",
"(",
"X",
",",
"type_of_inputs",
"=",
"'classic'",
",",
"estimator",
"=",
"self",
",",
"preprocessor",
"=",
"self",
".",
"preprocessor_",
",",
"accept_sparse",
"=",
"True... | Embeds data points in the learned linear embedding space.
Transforms samples in ``X`` into ``X_embedded``, samples inside a new
embedding space such that: ``X_embedded = X.dot(L.T)``, where ``L`` is
the learned linear transformation (See :class:`MahalanobisMixin`).
Parameters
----------
X : `numpy.ndarray`, shape=(n_samples, n_features)
The data points to embed.
Returns
-------
X_embedded : `numpy.ndarray`, shape=(n_samples, num_dims)
The embedded data points. | [
"Embeds",
"data",
"points",
"in",
"the",
"learned",
"linear",
"embedding",
"space",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/base_metric.py#L221-L241 |
226,893 | metric-learn/metric-learn | metric_learn/base_metric.py | _PairsClassifierMixin.decision_function | def decision_function(self, pairs):
"""Returns the decision function used to classify the pairs.
Returns the opposite of the learned metric value between samples in every
pair, to be consistent with scikit-learn conventions. Hence it should
ideally be low for dissimilar samples and high for similar samples.
This is the decision function that is used to classify pairs as similar
(+1), or dissimilar (-1).
Parameters
----------
pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2)
3D Array of pairs to predict, with each row corresponding to two
points, or 2D array of indices of pairs if the metric learner uses a
preprocessor.
Returns
-------
y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,)
The predicted decision function value for each pair.
"""
pairs = check_input(pairs, type_of_inputs='tuples',
preprocessor=self.preprocessor_,
estimator=self, tuple_size=self._tuple_size)
return - self.score_pairs(pairs) | python | def decision_function(self, pairs):
pairs = check_input(pairs, type_of_inputs='tuples',
preprocessor=self.preprocessor_,
estimator=self, tuple_size=self._tuple_size)
return - self.score_pairs(pairs) | [
"def",
"decision_function",
"(",
"self",
",",
"pairs",
")",
":",
"pairs",
"=",
"check_input",
"(",
"pairs",
",",
"type_of_inputs",
"=",
"'tuples'",
",",
"preprocessor",
"=",
"self",
".",
"preprocessor_",
",",
"estimator",
"=",
"self",
",",
"tuple_size",
"=",... | Returns the decision function used to classify the pairs.
Returns the opposite of the learned metric value between samples in every
pair, to be consistent with scikit-learn conventions. Hence it should
ideally be low for dissimilar samples and high for similar samples.
This is the decision function that is used to classify pairs as similar
(+1), or dissimilar (-1).
Parameters
----------
pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2)
3D Array of pairs to predict, with each row corresponding to two
points, or 2D array of indices of pairs if the metric learner uses a
preprocessor.
Returns
-------
y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,)
The predicted decision function value for each pair. | [
"Returns",
"the",
"decision",
"function",
"used",
"to",
"classify",
"the",
"pairs",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/base_metric.py#L331-L355 |
226,894 | metric-learn/metric-learn | metric_learn/base_metric.py | _PairsClassifierMixin.calibrate_threshold | def calibrate_threshold(self, pairs_valid, y_valid, strategy='accuracy',
min_rate=None, beta=1.):
"""Decision threshold calibration for pairwise binary classification
Method that calibrates the decision threshold (cutoff point) of the metric
learner. This threshold will then be used when calling the method
`predict`. The methods for picking cutoff points make use of traditional
binary classification evaluation statistics such as the true positive and
true negative rates and F-scores. The threshold will be found to maximize
the chosen score on the validation set ``(pairs_valid, y_valid)``.
See more in the :ref:`User Guide <calibration>`.
Parameters
----------
strategy : str, optional (default='accuracy')
The strategy to use for choosing the cutoff threshold.
'accuracy'
Selects a decision threshold that maximizes the accuracy.
'f_beta'
Selects a decision threshold that maximizes the f_beta score,
with beta given by the parameter `beta`.
'max_tpr'
Selects a decision threshold that yields the highest true positive
rate with true negative rate at least equal to the value of the
parameter `min_rate`.
'max_tnr'
Selects a decision threshold that yields the highest true negative
rate with true positive rate at least equal to the value of the
parameter `min_rate`.
beta : float in [0, 1], optional (default=None)
Beta value to be used in case strategy == 'f_beta'.
min_rate : float in [0, 1] or None, (default=None)
In case strategy is 'max_tpr' or 'max_tnr' this parameter must be set
to specify the minimal value for the true negative rate or true positive
rate respectively that needs to be achieved.
pairs_valid : array-like, shape=(n_pairs_valid, 2, n_features)
The validation set of pairs to use to set the threshold.
y_valid : array-like, shape=(n_pairs_valid,)
The labels of the pairs of the validation set to use to set the
threshold. They must be +1 for positive pairs and -1 for negative pairs.
References
----------
.. [1] Receiver-operating characteristic (ROC) plots: a fundamental
evaluation tool in clinical medicine, MH Zweig, G Campbell -
Clinical chemistry, 1993
.. [2] most of the code of this function is from scikit-learn's PR #10117
See Also
--------
sklearn.calibration : scikit-learn's module for calibrating classifiers
"""
self._validate_calibration_params(strategy, min_rate, beta)
pairs_valid, y_valid = self._prepare_inputs(pairs_valid, y_valid,
type_of_inputs='tuples')
n_samples = pairs_valid.shape[0]
if strategy == 'accuracy':
scores = self.decision_function(pairs_valid)
scores_sorted_idces = np.argsort(scores)[::-1]
scores_sorted = scores[scores_sorted_idces]
# true labels ordered by decision_function value: (higher first)
y_ordered = y_valid[scores_sorted_idces]
# we need to add a threshold that will reject all points
scores_sorted = np.concatenate([[scores_sorted[0] + 1], scores_sorted])
# finds the threshold that maximizes the accuracy:
cum_tp = stable_cumsum(y_ordered == 1) # cumulative number of true
# positives
# we need to add the point where all samples are rejected:
cum_tp = np.concatenate([[0.], cum_tp])
cum_tn_inverted = stable_cumsum(y_ordered[::-1] == -1)
cum_tn = np.concatenate([[0.], cum_tn_inverted])[::-1]
cum_accuracy = (cum_tp + cum_tn) / n_samples
imax = np.argmax(cum_accuracy)
# we set the threshold to the lowest accepted score
# note: we are working with negative distances but we want the threshold
# to be with respect to the actual distances so we take minus sign
self.threshold_ = - scores_sorted[imax]
# note: if the best is to reject all points it's already one of the
# thresholds (scores_sorted[0])
return self
if strategy == 'f_beta':
precision, recall, thresholds = precision_recall_curve(
y_valid, self.decision_function(pairs_valid), pos_label=1)
# here the thresholds are decreasing
# We ignore the warnings here, in the same taste as
# https://github.com/scikit-learn/scikit-learn/blob/62d205980446a1abc1065
# f4332fd74eee57fcf73/sklearn/metrics/classification.py#L1284
with np.errstate(divide='ignore', invalid='ignore'):
f_beta = ((1 + beta**2) * (precision * recall) /
(beta**2 * precision + recall))
# We need to set nans to zero otherwise they will be considered higher
# than the others (also discussed in https://github.com/scikit-learn/
# scikit-learn/pull/10117/files#r262115773)
f_beta[np.isnan(f_beta)] = 0.
imax = np.argmax(f_beta)
# we set the threshold to the lowest accepted score
# note: we are working with negative distances but we want the threshold
# to be with respect to the actual distances so we take minus sign
self.threshold_ = - thresholds[imax]
# Note: we don't need to deal with rejecting all points (i.e. threshold =
# max_scores + 1), since this can never happen to be optimal
# (see a more detailed discussion in test_calibrate_threshold_extreme)
return self
fpr, tpr, thresholds = roc_curve(y_valid,
self.decision_function(pairs_valid),
pos_label=1)
# here the thresholds are decreasing
fpr, tpr, thresholds = fpr, tpr, thresholds
if strategy in ['max_tpr', 'max_tnr']:
if strategy == 'max_tpr':
indices = np.where(1 - fpr >= min_rate)[0]
imax = np.argmax(tpr[indices])
if strategy == 'max_tnr':
indices = np.where(tpr >= min_rate)[0]
imax = np.argmax(1 - fpr[indices])
imax_valid = indices[imax]
# note: we are working with negative distances but we want the threshold
# to be with respect to the actual distances so we take minus sign
if indices[imax] == len(thresholds): # we want to accept everything
self.threshold_ = - (thresholds[imax_valid] - 1)
else:
# thanks to roc_curve, the first point will always be max_scores
# + 1, see: https://github.com/scikit-learn/scikit-learn/pull/13523
self.threshold_ = - thresholds[imax_valid]
return self | python | def calibrate_threshold(self, pairs_valid, y_valid, strategy='accuracy',
min_rate=None, beta=1.):
self._validate_calibration_params(strategy, min_rate, beta)
pairs_valid, y_valid = self._prepare_inputs(pairs_valid, y_valid,
type_of_inputs='tuples')
n_samples = pairs_valid.shape[0]
if strategy == 'accuracy':
scores = self.decision_function(pairs_valid)
scores_sorted_idces = np.argsort(scores)[::-1]
scores_sorted = scores[scores_sorted_idces]
# true labels ordered by decision_function value: (higher first)
y_ordered = y_valid[scores_sorted_idces]
# we need to add a threshold that will reject all points
scores_sorted = np.concatenate([[scores_sorted[0] + 1], scores_sorted])
# finds the threshold that maximizes the accuracy:
cum_tp = stable_cumsum(y_ordered == 1) # cumulative number of true
# positives
# we need to add the point where all samples are rejected:
cum_tp = np.concatenate([[0.], cum_tp])
cum_tn_inverted = stable_cumsum(y_ordered[::-1] == -1)
cum_tn = np.concatenate([[0.], cum_tn_inverted])[::-1]
cum_accuracy = (cum_tp + cum_tn) / n_samples
imax = np.argmax(cum_accuracy)
# we set the threshold to the lowest accepted score
# note: we are working with negative distances but we want the threshold
# to be with respect to the actual distances so we take minus sign
self.threshold_ = - scores_sorted[imax]
# note: if the best is to reject all points it's already one of the
# thresholds (scores_sorted[0])
return self
if strategy == 'f_beta':
precision, recall, thresholds = precision_recall_curve(
y_valid, self.decision_function(pairs_valid), pos_label=1)
# here the thresholds are decreasing
# We ignore the warnings here, in the same taste as
# https://github.com/scikit-learn/scikit-learn/blob/62d205980446a1abc1065
# f4332fd74eee57fcf73/sklearn/metrics/classification.py#L1284
with np.errstate(divide='ignore', invalid='ignore'):
f_beta = ((1 + beta**2) * (precision * recall) /
(beta**2 * precision + recall))
# We need to set nans to zero otherwise they will be considered higher
# than the others (also discussed in https://github.com/scikit-learn/
# scikit-learn/pull/10117/files#r262115773)
f_beta[np.isnan(f_beta)] = 0.
imax = np.argmax(f_beta)
# we set the threshold to the lowest accepted score
# note: we are working with negative distances but we want the threshold
# to be with respect to the actual distances so we take minus sign
self.threshold_ = - thresholds[imax]
# Note: we don't need to deal with rejecting all points (i.e. threshold =
# max_scores + 1), since this can never happen to be optimal
# (see a more detailed discussion in test_calibrate_threshold_extreme)
return self
fpr, tpr, thresholds = roc_curve(y_valid,
self.decision_function(pairs_valid),
pos_label=1)
# here the thresholds are decreasing
fpr, tpr, thresholds = fpr, tpr, thresholds
if strategy in ['max_tpr', 'max_tnr']:
if strategy == 'max_tpr':
indices = np.where(1 - fpr >= min_rate)[0]
imax = np.argmax(tpr[indices])
if strategy == 'max_tnr':
indices = np.where(tpr >= min_rate)[0]
imax = np.argmax(1 - fpr[indices])
imax_valid = indices[imax]
# note: we are working with negative distances but we want the threshold
# to be with respect to the actual distances so we take minus sign
if indices[imax] == len(thresholds): # we want to accept everything
self.threshold_ = - (thresholds[imax_valid] - 1)
else:
# thanks to roc_curve, the first point will always be max_scores
# + 1, see: https://github.com/scikit-learn/scikit-learn/pull/13523
self.threshold_ = - thresholds[imax_valid]
return self | [
"def",
"calibrate_threshold",
"(",
"self",
",",
"pairs_valid",
",",
"y_valid",
",",
"strategy",
"=",
"'accuracy'",
",",
"min_rate",
"=",
"None",
",",
"beta",
"=",
"1.",
")",
":",
"self",
".",
"_validate_calibration_params",
"(",
"strategy",
",",
"min_rate",
... | Decision threshold calibration for pairwise binary classification
Method that calibrates the decision threshold (cutoff point) of the metric
learner. This threshold will then be used when calling the method
`predict`. The methods for picking cutoff points make use of traditional
binary classification evaluation statistics such as the true positive and
true negative rates and F-scores. The threshold will be found to maximize
the chosen score on the validation set ``(pairs_valid, y_valid)``.
See more in the :ref:`User Guide <calibration>`.
Parameters
----------
strategy : str, optional (default='accuracy')
The strategy to use for choosing the cutoff threshold.
'accuracy'
Selects a decision threshold that maximizes the accuracy.
'f_beta'
Selects a decision threshold that maximizes the f_beta score,
with beta given by the parameter `beta`.
'max_tpr'
Selects a decision threshold that yields the highest true positive
rate with true negative rate at least equal to the value of the
parameter `min_rate`.
'max_tnr'
Selects a decision threshold that yields the highest true negative
rate with true positive rate at least equal to the value of the
parameter `min_rate`.
beta : float in [0, 1], optional (default=None)
Beta value to be used in case strategy == 'f_beta'.
min_rate : float in [0, 1] or None, (default=None)
In case strategy is 'max_tpr' or 'max_tnr' this parameter must be set
to specify the minimal value for the true negative rate or true positive
rate respectively that needs to be achieved.
pairs_valid : array-like, shape=(n_pairs_valid, 2, n_features)
The validation set of pairs to use to set the threshold.
y_valid : array-like, shape=(n_pairs_valid,)
The labels of the pairs of the validation set to use to set the
threshold. They must be +1 for positive pairs and -1 for negative pairs.
References
----------
.. [1] Receiver-operating characteristic (ROC) plots: a fundamental
evaluation tool in clinical medicine, MH Zweig, G Campbell -
Clinical chemistry, 1993
.. [2] most of the code of this function is from scikit-learn's PR #10117
See Also
--------
sklearn.calibration : scikit-learn's module for calibrating classifiers | [
"Decision",
"threshold",
"calibration",
"for",
"pairwise",
"binary",
"classification"
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/base_metric.py#L405-L546 |
226,895 | metric-learn/metric-learn | metric_learn/base_metric.py | _PairsClassifierMixin._validate_calibration_params | def _validate_calibration_params(strategy='accuracy', min_rate=None,
beta=1.):
"""Ensure that calibration parameters have allowed values"""
if strategy not in ('accuracy', 'f_beta', 'max_tpr',
'max_tnr'):
raise ValueError('Strategy can either be "accuracy", "f_beta" or '
'"max_tpr" or "max_tnr". Got "{}" instead.'
.format(strategy))
if strategy == 'max_tpr' or strategy == 'max_tnr':
if (min_rate is None or not isinstance(min_rate, (int, float)) or
not min_rate >= 0 or not min_rate <= 1):
raise ValueError('Parameter min_rate must be a number in'
'[0, 1]. '
'Got {} instead.'.format(min_rate))
if strategy == 'f_beta':
if beta is None or not isinstance(beta, (int, float)):
raise ValueError('Parameter beta must be a real number. '
'Got {} instead.'.format(type(beta))) | python | def _validate_calibration_params(strategy='accuracy', min_rate=None,
beta=1.):
if strategy not in ('accuracy', 'f_beta', 'max_tpr',
'max_tnr'):
raise ValueError('Strategy can either be "accuracy", "f_beta" or '
'"max_tpr" or "max_tnr". Got "{}" instead.'
.format(strategy))
if strategy == 'max_tpr' or strategy == 'max_tnr':
if (min_rate is None or not isinstance(min_rate, (int, float)) or
not min_rate >= 0 or not min_rate <= 1):
raise ValueError('Parameter min_rate must be a number in'
'[0, 1]. '
'Got {} instead.'.format(min_rate))
if strategy == 'f_beta':
if beta is None or not isinstance(beta, (int, float)):
raise ValueError('Parameter beta must be a real number. '
'Got {} instead.'.format(type(beta))) | [
"def",
"_validate_calibration_params",
"(",
"strategy",
"=",
"'accuracy'",
",",
"min_rate",
"=",
"None",
",",
"beta",
"=",
"1.",
")",
":",
"if",
"strategy",
"not",
"in",
"(",
"'accuracy'",
",",
"'f_beta'",
",",
"'max_tpr'",
",",
"'max_tnr'",
")",
":",
"rai... | Ensure that calibration parameters have allowed values | [
"Ensure",
"that",
"calibration",
"parameters",
"have",
"allowed",
"values"
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/base_metric.py#L549-L566 |
226,896 | metric-learn/metric-learn | metric_learn/base_metric.py | _QuadrupletsClassifierMixin.predict | def predict(self, quadruplets):
"""Predicts the ordering between sample distances in input quadruplets.
For each quadruplet, returns 1 if the quadruplet is in the right order (
first pair is more similar than second pair), and -1 if not.
Parameters
----------
quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or
(n_quadruplets, 4)
3D Array of quadruplets to predict, with each row corresponding to four
points, or 2D array of indices of quadruplets if the metric learner
uses a preprocessor.
Returns
-------
prediction : `numpy.ndarray` of floats, shape=(n_constraints,)
Predictions of the ordering of pairs, for each quadruplet.
"""
check_is_fitted(self, 'transformer_')
quadruplets = check_input(quadruplets, type_of_inputs='tuples',
preprocessor=self.preprocessor_,
estimator=self, tuple_size=self._tuple_size)
return np.sign(self.decision_function(quadruplets)) | python | def predict(self, quadruplets):
check_is_fitted(self, 'transformer_')
quadruplets = check_input(quadruplets, type_of_inputs='tuples',
preprocessor=self.preprocessor_,
estimator=self, tuple_size=self._tuple_size)
return np.sign(self.decision_function(quadruplets)) | [
"def",
"predict",
"(",
"self",
",",
"quadruplets",
")",
":",
"check_is_fitted",
"(",
"self",
",",
"'transformer_'",
")",
"quadruplets",
"=",
"check_input",
"(",
"quadruplets",
",",
"type_of_inputs",
"=",
"'tuples'",
",",
"preprocessor",
"=",
"self",
".",
"prep... | Predicts the ordering between sample distances in input quadruplets.
For each quadruplet, returns 1 if the quadruplet is in the right order (
first pair is more similar than second pair), and -1 if not.
Parameters
----------
quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or
(n_quadruplets, 4)
3D Array of quadruplets to predict, with each row corresponding to four
points, or 2D array of indices of quadruplets if the metric learner
uses a preprocessor.
Returns
-------
prediction : `numpy.ndarray` of floats, shape=(n_constraints,)
Predictions of the ordering of pairs, for each quadruplet. | [
"Predicts",
"the",
"ordering",
"between",
"sample",
"distances",
"in",
"input",
"quadruplets",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/base_metric.py#L573-L596 |
226,897 | metric-learn/metric-learn | metric_learn/rca.py | RCA.fit | def fit(self, X, chunks):
"""Learn the RCA model.
Parameters
----------
data : (n x d) data matrix
Each row corresponds to a single instance
chunks : (n,) array of ints
When ``chunks[i] == -1``, point i doesn't belong to any chunklet.
When ``chunks[i] == j``, point i belongs to chunklet j.
"""
X = self._prepare_inputs(X, ensure_min_samples=2)
# PCA projection to remove noise and redundant information.
if self.pca_comps is not None:
pca = decomposition.PCA(n_components=self.pca_comps)
X_t = pca.fit_transform(X)
M_pca = pca.components_
else:
X_t = X - X.mean(axis=0)
M_pca = None
chunks = np.asanyarray(chunks, dtype=int)
chunk_mask, chunked_data = _chunk_mean_centering(X_t, chunks)
inner_cov = np.atleast_2d(np.cov(chunked_data, rowvar=0, bias=1))
dim = self._check_dimension(np.linalg.matrix_rank(inner_cov), X_t)
# Fisher Linear Discriminant projection
if dim < X_t.shape[1]:
total_cov = np.cov(X_t[chunk_mask], rowvar=0)
tmp = np.linalg.lstsq(total_cov, inner_cov)[0]
vals, vecs = np.linalg.eig(tmp)
inds = np.argsort(vals)[:dim]
A = vecs[:, inds]
inner_cov = np.atleast_2d(A.T.dot(inner_cov).dot(A))
self.transformer_ = _inv_sqrtm(inner_cov).dot(A.T)
else:
self.transformer_ = _inv_sqrtm(inner_cov).T
if M_pca is not None:
self.transformer_ = np.atleast_2d(self.transformer_.dot(M_pca))
return self | python | def fit(self, X, chunks):
X = self._prepare_inputs(X, ensure_min_samples=2)
# PCA projection to remove noise and redundant information.
if self.pca_comps is not None:
pca = decomposition.PCA(n_components=self.pca_comps)
X_t = pca.fit_transform(X)
M_pca = pca.components_
else:
X_t = X - X.mean(axis=0)
M_pca = None
chunks = np.asanyarray(chunks, dtype=int)
chunk_mask, chunked_data = _chunk_mean_centering(X_t, chunks)
inner_cov = np.atleast_2d(np.cov(chunked_data, rowvar=0, bias=1))
dim = self._check_dimension(np.linalg.matrix_rank(inner_cov), X_t)
# Fisher Linear Discriminant projection
if dim < X_t.shape[1]:
total_cov = np.cov(X_t[chunk_mask], rowvar=0)
tmp = np.linalg.lstsq(total_cov, inner_cov)[0]
vals, vecs = np.linalg.eig(tmp)
inds = np.argsort(vals)[:dim]
A = vecs[:, inds]
inner_cov = np.atleast_2d(A.T.dot(inner_cov).dot(A))
self.transformer_ = _inv_sqrtm(inner_cov).dot(A.T)
else:
self.transformer_ = _inv_sqrtm(inner_cov).T
if M_pca is not None:
self.transformer_ = np.atleast_2d(self.transformer_.dot(M_pca))
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"chunks",
")",
":",
"X",
"=",
"self",
".",
"_prepare_inputs",
"(",
"X",
",",
"ensure_min_samples",
"=",
"2",
")",
"# PCA projection to remove noise and redundant information.",
"if",
"self",
".",
"pca_comps",
"is",
"no... | Learn the RCA model.
Parameters
----------
data : (n x d) data matrix
Each row corresponds to a single instance
chunks : (n,) array of ints
When ``chunks[i] == -1``, point i doesn't belong to any chunklet.
When ``chunks[i] == j``, point i belongs to chunklet j. | [
"Learn",
"the",
"RCA",
"model",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/rca.py#L90-L133 |
226,898 | metric-learn/metric-learn | metric_learn/rca.py | RCA_Supervised.fit | def fit(self, X, y, random_state=np.random):
"""Create constraints from labels and learn the RCA model.
Needs num_constraints specified in constructor.
Parameters
----------
X : (n x d) data matrix
each row corresponds to a single instance
y : (n) data labels
random_state : a random.seed object to fix the random_state if needed.
"""
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
chunks = Constraints(y).chunks(num_chunks=self.num_chunks,
chunk_size=self.chunk_size,
random_state=random_state)
return RCA.fit(self, X, chunks) | python | def fit(self, X, y, random_state=np.random):
X, y = self._prepare_inputs(X, y, ensure_min_samples=2)
chunks = Constraints(y).chunks(num_chunks=self.num_chunks,
chunk_size=self.chunk_size,
random_state=random_state)
return RCA.fit(self, X, chunks) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"random_state",
"=",
"np",
".",
"random",
")",
":",
"X",
",",
"y",
"=",
"self",
".",
"_prepare_inputs",
"(",
"X",
",",
"y",
",",
"ensure_min_samples",
"=",
"2",
")",
"chunks",
"=",
"Constraints",
... | Create constraints from labels and learn the RCA model.
Needs num_constraints specified in constructor.
Parameters
----------
X : (n x d) data matrix
each row corresponds to a single instance
y : (n) data labels
random_state : a random.seed object to fix the random_state if needed. | [
"Create",
"constraints",
"from",
"labels",
"and",
"learn",
"the",
"RCA",
"model",
".",
"Needs",
"num_constraints",
"specified",
"in",
"constructor",
"."
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/rca.py#L174-L189 |
226,899 | metric-learn/metric-learn | metric_learn/_util.py | make_name | def make_name(estimator):
"""Helper function that returns the name of estimator or the given string
if a string is given
"""
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = None
return estimator_name | python | def make_name(estimator):
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = None
return estimator_name | [
"def",
"make_name",
"(",
"estimator",
")",
":",
"if",
"estimator",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"estimator",
",",
"six",
".",
"string_types",
")",
":",
"estimator_name",
"=",
"estimator",
"else",
":",
"estimator_name",
"=",
"estimator",... | Helper function that returns the name of estimator or the given string
if a string is given | [
"Helper",
"function",
"that",
"returns",
"the",
"name",
"of",
"estimator",
"or",
"the",
"given",
"string",
"if",
"a",
"string",
"is",
"given"
] | d945df1342c69012608bb70b92520392a0853de6 | https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/_util.py#L275-L286 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.