repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
vpelletier/python-libusb1 | usb1/__init__.py | USBPoller.unregister | def unregister(self, fd):
"""
Unregister an USB-unrelated fd from poller.
Convenience method.
"""
if fd in self.__fd_set:
raise ValueError(
'This fd is a special USB event fd, it must stay registered.'
)
self.__poller.unregister(fd) | python | def unregister(self, fd):
"""
Unregister an USB-unrelated fd from poller.
Convenience method.
"""
if fd in self.__fd_set:
raise ValueError(
'This fd is a special USB event fd, it must stay registered.'
)
self.__poller.unregister(fd) | [
"def",
"unregister",
"(",
"self",
",",
"fd",
")",
":",
"if",
"fd",
"in",
"self",
".",
"__fd_set",
":",
"raise",
"ValueError",
"(",
"'This fd is a special USB event fd, it must stay registered.'",
")",
"self",
".",
"__poller",
".",
"unregister",
"(",
"fd",
")"
] | Unregister an USB-unrelated fd from poller.
Convenience method. | [
"Unregister",
"an",
"USB",
"-",
"unrelated",
"fd",
"from",
"poller",
".",
"Convenience",
"method",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L1118-L1127 | train | 204,700 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDeviceHandle.close | def close(self):
"""
Close this handle. If not called explicitely, will be called by
destructor.
This method cancels any in-flight transfer when it is called. As
cancellation is not immediate, this method needs to let libusb handle
events until transfers are actually cancelled.
In multi-threaded programs, this can lead to stalls. To avoid this,
do not close nor let GC collect a USBDeviceHandle which has in-flight
transfers.
"""
handle = self.__handle
if handle is None:
return
# Build a strong set from weak self.__transfer_set so we can doom
# and close all contained transfers.
# Because of backward compatibility, self.__transfer_set might be a
# wrapper around WeakKeyDictionary. As it might be modified by gc,
# we must pop until there is not key left instead of iterating over
# it.
weak_transfer_set = self.__transfer_set
transfer_set = self.__set()
while True:
try:
transfer = weak_transfer_set.pop()
except self.__KeyError:
break
transfer_set.add(transfer)
transfer.doom()
inflight = self.__inflight
for transfer in inflight:
try:
transfer.cancel()
except (self.__USBErrorNotFound, self.__USBErrorNoDevice):
pass
while inflight:
try:
self.__context.handleEvents()
except self.__USBErrorInterrupted:
pass
for transfer in transfer_set:
transfer.close()
self.__libusb_close(handle)
self.__handle = None | python | def close(self):
"""
Close this handle. If not called explicitely, will be called by
destructor.
This method cancels any in-flight transfer when it is called. As
cancellation is not immediate, this method needs to let libusb handle
events until transfers are actually cancelled.
In multi-threaded programs, this can lead to stalls. To avoid this,
do not close nor let GC collect a USBDeviceHandle which has in-flight
transfers.
"""
handle = self.__handle
if handle is None:
return
# Build a strong set from weak self.__transfer_set so we can doom
# and close all contained transfers.
# Because of backward compatibility, self.__transfer_set might be a
# wrapper around WeakKeyDictionary. As it might be modified by gc,
# we must pop until there is not key left instead of iterating over
# it.
weak_transfer_set = self.__transfer_set
transfer_set = self.__set()
while True:
try:
transfer = weak_transfer_set.pop()
except self.__KeyError:
break
transfer_set.add(transfer)
transfer.doom()
inflight = self.__inflight
for transfer in inflight:
try:
transfer.cancel()
except (self.__USBErrorNotFound, self.__USBErrorNoDevice):
pass
while inflight:
try:
self.__context.handleEvents()
except self.__USBErrorInterrupted:
pass
for transfer in transfer_set:
transfer.close()
self.__libusb_close(handle)
self.__handle = None | [
"def",
"close",
"(",
"self",
")",
":",
"handle",
"=",
"self",
".",
"__handle",
"if",
"handle",
"is",
"None",
":",
"return",
"# Build a strong set from weak self.__transfer_set so we can doom",
"# and close all contained transfers.",
"# Because of backward compatibility, self.__... | Close this handle. If not called explicitely, will be called by
destructor.
This method cancels any in-flight transfer when it is called. As
cancellation is not immediate, this method needs to let libusb handle
events until transfers are actually cancelled.
In multi-threaded programs, this can lead to stalls. To avoid this,
do not close nor let GC collect a USBDeviceHandle which has in-flight
transfers. | [
"Close",
"this",
"handle",
".",
"If",
"not",
"called",
"explicitely",
"will",
"be",
"called",
"by",
"destructor",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L1194-L1238 | train | 204,701 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDeviceHandle.getConfiguration | def getConfiguration(self):
"""
Get the current configuration number for this device.
"""
configuration = c_int()
mayRaiseUSBError(libusb1.libusb_get_configuration(
self.__handle, byref(configuration),
))
return configuration.value | python | def getConfiguration(self):
"""
Get the current configuration number for this device.
"""
configuration = c_int()
mayRaiseUSBError(libusb1.libusb_get_configuration(
self.__handle, byref(configuration),
))
return configuration.value | [
"def",
"getConfiguration",
"(",
"self",
")",
":",
"configuration",
"=",
"c_int",
"(",
")",
"mayRaiseUSBError",
"(",
"libusb1",
".",
"libusb_get_configuration",
"(",
"self",
".",
"__handle",
",",
"byref",
"(",
"configuration",
")",
",",
")",
")",
"return",
"c... | Get the current configuration number for this device. | [
"Get",
"the",
"current",
"configuration",
"number",
"for",
"this",
"device",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L1247-L1255 | train | 204,702 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDeviceHandle.kernelDriverActive | def kernelDriverActive(self, interface):
"""
Tell whether a kernel driver is active on given interface number.
"""
result = libusb1.libusb_kernel_driver_active(self.__handle, interface)
if result == 0:
return False
elif result == 1:
return True
raiseUSBError(result) | python | def kernelDriverActive(self, interface):
"""
Tell whether a kernel driver is active on given interface number.
"""
result = libusb1.libusb_kernel_driver_active(self.__handle, interface)
if result == 0:
return False
elif result == 1:
return True
raiseUSBError(result) | [
"def",
"kernelDriverActive",
"(",
"self",
",",
"interface",
")",
":",
"result",
"=",
"libusb1",
".",
"libusb_kernel_driver_active",
"(",
"self",
".",
"__handle",
",",
"interface",
")",
"if",
"result",
"==",
"0",
":",
"return",
"False",
"elif",
"result",
"=="... | Tell whether a kernel driver is active on given interface number. | [
"Tell",
"whether",
"a",
"kernel",
"driver",
"is",
"active",
"on",
"given",
"interface",
"number",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L1336-L1345 | train | 204,703 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDeviceHandle.getStringDescriptor | def getStringDescriptor(self, descriptor, lang_id, errors='strict'):
"""
Fetch description string for given descriptor and in given language.
Use getSupportedLanguageList to know which languages are available.
Return value is a unicode string.
Return None if there is no such descriptor on device.
"""
if descriptor == 0:
return None
descriptor_string = bytearray(STRING_LENGTH)
try:
received = mayRaiseUSBError(libusb1.libusb_get_string_descriptor(
self.__handle, descriptor, lang_id,
create_binary_buffer(descriptor_string)[0],
STRING_LENGTH,
))
# pylint: disable=undefined-variable
except USBErrorNotFound:
# pylint: enable=undefined-variable
return None
if received < 2 or descriptor_string[1] != DT_STRING:
raise ValueError('Invalid string descriptor')
return descriptor_string[2:min(
received,
descriptor_string[0],
)].decode('UTF-16-LE', errors=errors) | python | def getStringDescriptor(self, descriptor, lang_id, errors='strict'):
"""
Fetch description string for given descriptor and in given language.
Use getSupportedLanguageList to know which languages are available.
Return value is a unicode string.
Return None if there is no such descriptor on device.
"""
if descriptor == 0:
return None
descriptor_string = bytearray(STRING_LENGTH)
try:
received = mayRaiseUSBError(libusb1.libusb_get_string_descriptor(
self.__handle, descriptor, lang_id,
create_binary_buffer(descriptor_string)[0],
STRING_LENGTH,
))
# pylint: disable=undefined-variable
except USBErrorNotFound:
# pylint: enable=undefined-variable
return None
if received < 2 or descriptor_string[1] != DT_STRING:
raise ValueError('Invalid string descriptor')
return descriptor_string[2:min(
received,
descriptor_string[0],
)].decode('UTF-16-LE', errors=errors) | [
"def",
"getStringDescriptor",
"(",
"self",
",",
"descriptor",
",",
"lang_id",
",",
"errors",
"=",
"'strict'",
")",
":",
"if",
"descriptor",
"==",
"0",
":",
"return",
"None",
"descriptor_string",
"=",
"bytearray",
"(",
"STRING_LENGTH",
")",
"try",
":",
"recei... | Fetch description string for given descriptor and in given language.
Use getSupportedLanguageList to know which languages are available.
Return value is a unicode string.
Return None if there is no such descriptor on device. | [
"Fetch",
"description",
"string",
"for",
"given",
"descriptor",
"and",
"in",
"given",
"language",
".",
"Use",
"getSupportedLanguageList",
"to",
"know",
"which",
"languages",
"are",
"available",
".",
"Return",
"value",
"is",
"a",
"unicode",
"string",
".",
"Return... | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L1400-L1425 | train | 204,704 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDeviceHandle.getASCIIStringDescriptor | def getASCIIStringDescriptor(self, descriptor, errors='strict'):
"""
Fetch description string for given descriptor in first available
language.
Return value is a unicode string.
Return None if there is no such descriptor on device.
"""
if descriptor == 0:
return None
descriptor_string = bytearray(STRING_LENGTH)
try:
received = mayRaiseUSBError(libusb1.libusb_get_string_descriptor_ascii(
self.__handle, descriptor,
create_binary_buffer(descriptor_string)[0],
STRING_LENGTH,
))
# pylint: disable=undefined-variable
except USBErrorNotFound:
# pylint: enable=undefined-variable
return None
return descriptor_string[:received].decode('ASCII', errors=errors) | python | def getASCIIStringDescriptor(self, descriptor, errors='strict'):
"""
Fetch description string for given descriptor in first available
language.
Return value is a unicode string.
Return None if there is no such descriptor on device.
"""
if descriptor == 0:
return None
descriptor_string = bytearray(STRING_LENGTH)
try:
received = mayRaiseUSBError(libusb1.libusb_get_string_descriptor_ascii(
self.__handle, descriptor,
create_binary_buffer(descriptor_string)[0],
STRING_LENGTH,
))
# pylint: disable=undefined-variable
except USBErrorNotFound:
# pylint: enable=undefined-variable
return None
return descriptor_string[:received].decode('ASCII', errors=errors) | [
"def",
"getASCIIStringDescriptor",
"(",
"self",
",",
"descriptor",
",",
"errors",
"=",
"'strict'",
")",
":",
"if",
"descriptor",
"==",
"0",
":",
"return",
"None",
"descriptor_string",
"=",
"bytearray",
"(",
"STRING_LENGTH",
")",
"try",
":",
"received",
"=",
... | Fetch description string for given descriptor in first available
language.
Return value is a unicode string.
Return None if there is no such descriptor on device. | [
"Fetch",
"description",
"string",
"for",
"given",
"descriptor",
"in",
"first",
"available",
"language",
".",
"Return",
"value",
"is",
"a",
"unicode",
"string",
".",
"Return",
"None",
"if",
"there",
"is",
"no",
"such",
"descriptor",
"on",
"device",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L1427-L1447 | train | 204,705 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDevice.getPortNumberList | def getPortNumberList(self):
"""
Get the port number of each hub toward device.
"""
port_list = (c_uint8 * PATH_MAX_DEPTH)()
result = libusb1.libusb_get_port_numbers(
self.device_p, port_list, len(port_list))
mayRaiseUSBError(result)
return list(port_list[:result]) | python | def getPortNumberList(self):
"""
Get the port number of each hub toward device.
"""
port_list = (c_uint8 * PATH_MAX_DEPTH)()
result = libusb1.libusb_get_port_numbers(
self.device_p, port_list, len(port_list))
mayRaiseUSBError(result)
return list(port_list[:result]) | [
"def",
"getPortNumberList",
"(",
"self",
")",
":",
"port_list",
"=",
"(",
"c_uint8",
"*",
"PATH_MAX_DEPTH",
")",
"(",
")",
"result",
"=",
"libusb1",
".",
"libusb_get_port_numbers",
"(",
"self",
".",
"device_p",
",",
"port_list",
",",
"len",
"(",
"port_list",... | Get the port number of each hub toward device. | [
"Get",
"the",
"port",
"number",
"of",
"each",
"hub",
"toward",
"device",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L1987-L1995 | train | 204,706 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDevice.getMaxPacketSize | def getMaxPacketSize(self, endpoint):
"""
Get device's max packet size for given endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
"""
result = libusb1.libusb_get_max_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result | python | def getMaxPacketSize(self, endpoint):
"""
Get device's max packet size for given endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
"""
result = libusb1.libusb_get_max_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result | [
"def",
"getMaxPacketSize",
"(",
"self",
",",
"endpoint",
")",
":",
"result",
"=",
"libusb1",
".",
"libusb_get_max_packet_size",
"(",
"self",
".",
"device_p",
",",
"endpoint",
")",
"mayRaiseUSBError",
"(",
"result",
")",
"return",
"result"
] | Get device's max packet size for given endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting. | [
"Get",
"device",
"s",
"max",
"packet",
"size",
"for",
"given",
"endpoint",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2036-L2046 | train | 204,707 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDevice.getMaxISOPacketSize | def getMaxISOPacketSize(self, endpoint):
"""
Get the maximum size for a single isochronous packet for given
endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
"""
result = libusb1.libusb_get_max_iso_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result | python | def getMaxISOPacketSize(self, endpoint):
"""
Get the maximum size for a single isochronous packet for given
endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting.
"""
result = libusb1.libusb_get_max_iso_packet_size(self.device_p, endpoint)
mayRaiseUSBError(result)
return result | [
"def",
"getMaxISOPacketSize",
"(",
"self",
",",
"endpoint",
")",
":",
"result",
"=",
"libusb1",
".",
"libusb_get_max_iso_packet_size",
"(",
"self",
".",
"device_p",
",",
"endpoint",
")",
"mayRaiseUSBError",
"(",
"result",
")",
"return",
"result"
] | Get the maximum size for a single isochronous packet for given
endpoint.
Warning: this function will not always give you the expected result.
See https://libusb.org/ticket/77 . You should instead consult the
endpoint descriptor of current configuration and alternate setting. | [
"Get",
"the",
"maximum",
"size",
"for",
"a",
"single",
"isochronous",
"packet",
"for",
"given",
"endpoint",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2048-L2059 | train | 204,708 |
vpelletier/python-libusb1 | usb1/__init__.py | USBDevice.open | def open(self):
"""
Open device.
Returns an USBDeviceHandle instance.
"""
handle = libusb1.libusb_device_handle_p()
mayRaiseUSBError(libusb1.libusb_open(self.device_p, byref(handle)))
result = USBDeviceHandle(self.__context, handle, self)
self.__close_set.add(result)
return result | python | def open(self):
"""
Open device.
Returns an USBDeviceHandle instance.
"""
handle = libusb1.libusb_device_handle_p()
mayRaiseUSBError(libusb1.libusb_open(self.device_p, byref(handle)))
result = USBDeviceHandle(self.__context, handle, self)
self.__close_set.add(result)
return result | [
"def",
"open",
"(",
"self",
")",
":",
"handle",
"=",
"libusb1",
".",
"libusb_device_handle_p",
"(",
")",
"mayRaiseUSBError",
"(",
"libusb1",
".",
"libusb_open",
"(",
"self",
".",
"device_p",
",",
"byref",
"(",
"handle",
")",
")",
")",
"result",
"=",
"USB... | Open device.
Returns an USBDeviceHandle instance. | [
"Open",
"device",
".",
"Returns",
"an",
"USBDeviceHandle",
"instance",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2153-L2162 | train | 204,709 |
vpelletier/python-libusb1 | usb1/__init__.py | USBContext.getDeviceIterator | def getDeviceIterator(self, skip_on_error=False):
"""
Return an iterator over all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError.
"""
device_p_p = libusb1.libusb_device_p_p()
libusb_device_p = libusb1.libusb_device_p
device_list_len = libusb1.libusb_get_device_list(self.__context_p,
byref(device_p_p))
mayRaiseUSBError(device_list_len)
try:
for device_p in device_p_p[:device_list_len]:
try:
# Instanciate our own libusb_device_p object so we can free
# libusb-provided device list. Is this a bug in ctypes that
# it doesn't copy pointer value (=pointed memory address) ?
# At least, it's not so convenient and forces using such
# weird code.
device = USBDevice(self, libusb_device_p(device_p.contents))
except USBError:
if not skip_on_error:
raise
else:
self.__close_set.add(device)
yield device
finally:
libusb1.libusb_free_device_list(device_p_p, 1) | python | def getDeviceIterator(self, skip_on_error=False):
"""
Return an iterator over all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError.
"""
device_p_p = libusb1.libusb_device_p_p()
libusb_device_p = libusb1.libusb_device_p
device_list_len = libusb1.libusb_get_device_list(self.__context_p,
byref(device_p_p))
mayRaiseUSBError(device_list_len)
try:
for device_p in device_p_p[:device_list_len]:
try:
# Instanciate our own libusb_device_p object so we can free
# libusb-provided device list. Is this a bug in ctypes that
# it doesn't copy pointer value (=pointed memory address) ?
# At least, it's not so convenient and forces using such
# weird code.
device = USBDevice(self, libusb_device_p(device_p.contents))
except USBError:
if not skip_on_error:
raise
else:
self.__close_set.add(device)
yield device
finally:
libusb1.libusb_free_device_list(device_p_p, 1) | [
"def",
"getDeviceIterator",
"(",
"self",
",",
"skip_on_error",
"=",
"False",
")",
":",
"device_p_p",
"=",
"libusb1",
".",
"libusb_device_p_p",
"(",
")",
"libusb_device_p",
"=",
"libusb1",
".",
"libusb_device_p",
"device_list_len",
"=",
"libusb1",
".",
"libusb_get_... | Return an iterator over all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError. | [
"Return",
"an",
"iterator",
"over",
"all",
"USB",
"devices",
"currently",
"plugged",
"in",
"as",
"USBDevice",
"instances",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2308-L2337 | train | 204,710 |
vpelletier/python-libusb1 | usb1/__init__.py | USBContext.getDeviceList | def getDeviceList(self, skip_on_access_error=False, skip_on_error=False):
"""
Return a list of all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError.
skip_on_access_error (bool)
DEPRECATED. Alias for skip_on_error.
"""
return list(
self.getDeviceIterator(
skip_on_error=skip_on_access_error or skip_on_error,
),
) | python | def getDeviceList(self, skip_on_access_error=False, skip_on_error=False):
"""
Return a list of all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError.
skip_on_access_error (bool)
DEPRECATED. Alias for skip_on_error.
"""
return list(
self.getDeviceIterator(
skip_on_error=skip_on_access_error or skip_on_error,
),
) | [
"def",
"getDeviceList",
"(",
"self",
",",
"skip_on_access_error",
"=",
"False",
",",
"skip_on_error",
"=",
"False",
")",
":",
"return",
"list",
"(",
"self",
".",
"getDeviceIterator",
"(",
"skip_on_error",
"=",
"skip_on_access_error",
"or",
"skip_on_error",
",",
... | Return a list of all USB devices currently plugged in, as USBDevice
instances.
skip_on_error (bool)
If True, ignore devices which raise USBError.
skip_on_access_error (bool)
DEPRECATED. Alias for skip_on_error. | [
"Return",
"a",
"list",
"of",
"all",
"USB",
"devices",
"currently",
"plugged",
"in",
"as",
"USBDevice",
"instances",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2339-L2354 | train | 204,711 |
vpelletier/python-libusb1 | usb1/__init__.py | USBContext.getPollFDList | def getPollFDList(self):
"""
Return file descriptors to be used to poll USB events.
You should not have to call this method, unless you are integrating
this class with a polling mechanism.
"""
pollfd_p_p = libusb1.libusb_get_pollfds(self.__context_p)
if not pollfd_p_p:
errno = get_errno()
if errno:
raise OSError(errno)
else:
# Assume not implemented
raise NotImplementedError(
'Your libusb does not seem to implement pollable FDs')
try:
result = []
append = result.append
fd_index = 0
while pollfd_p_p[fd_index]:
append((
pollfd_p_p[fd_index].contents.fd,
pollfd_p_p[fd_index].contents.events,
))
fd_index += 1
finally:
_free(pollfd_p_p)
return result | python | def getPollFDList(self):
"""
Return file descriptors to be used to poll USB events.
You should not have to call this method, unless you are integrating
this class with a polling mechanism.
"""
pollfd_p_p = libusb1.libusb_get_pollfds(self.__context_p)
if not pollfd_p_p:
errno = get_errno()
if errno:
raise OSError(errno)
else:
# Assume not implemented
raise NotImplementedError(
'Your libusb does not seem to implement pollable FDs')
try:
result = []
append = result.append
fd_index = 0
while pollfd_p_p[fd_index]:
append((
pollfd_p_p[fd_index].contents.fd,
pollfd_p_p[fd_index].contents.events,
))
fd_index += 1
finally:
_free(pollfd_p_p)
return result | [
"def",
"getPollFDList",
"(",
"self",
")",
":",
"pollfd_p_p",
"=",
"libusb1",
".",
"libusb_get_pollfds",
"(",
"self",
".",
"__context_p",
")",
"if",
"not",
"pollfd_p_p",
":",
"errno",
"=",
"get_errno",
"(",
")",
"if",
"errno",
":",
"raise",
"OSError",
"(",
... | Return file descriptors to be used to poll USB events.
You should not have to call this method, unless you are integrating
this class with a polling mechanism. | [
"Return",
"file",
"descriptors",
"to",
"be",
"used",
"to",
"poll",
"USB",
"events",
".",
"You",
"should",
"not",
"have",
"to",
"call",
"this",
"method",
"unless",
"you",
"are",
"integrating",
"this",
"class",
"with",
"a",
"polling",
"mechanism",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2394-L2421 | train | 204,712 |
vpelletier/python-libusb1 | usb1/__init__.py | USBContext.handleEventsTimeout | def handleEventsTimeout(self, tv=0):
"""
Handle any pending event.
If tv is 0, will return immediately after handling already-pending
events.
Otherwise, defines the maximum amount of time to wait for events, in
seconds.
"""
if tv is None:
tv = 0
tv_s = int(tv)
real_tv = libusb1.timeval(tv_s, int((tv - tv_s) * 1000000))
mayRaiseUSBError(
libusb1.libusb_handle_events_timeout(
self.__context_p, byref(real_tv),
),
) | python | def handleEventsTimeout(self, tv=0):
"""
Handle any pending event.
If tv is 0, will return immediately after handling already-pending
events.
Otherwise, defines the maximum amount of time to wait for events, in
seconds.
"""
if tv is None:
tv = 0
tv_s = int(tv)
real_tv = libusb1.timeval(tv_s, int((tv - tv_s) * 1000000))
mayRaiseUSBError(
libusb1.libusb_handle_events_timeout(
self.__context_p, byref(real_tv),
),
) | [
"def",
"handleEventsTimeout",
"(",
"self",
",",
"tv",
"=",
"0",
")",
":",
"if",
"tv",
"is",
"None",
":",
"tv",
"=",
"0",
"tv_s",
"=",
"int",
"(",
"tv",
")",
"real_tv",
"=",
"libusb1",
".",
"timeval",
"(",
"tv_s",
",",
"int",
"(",
"(",
"tv",
"-"... | Handle any pending event.
If tv is 0, will return immediately after handling already-pending
events.
Otherwise, defines the maximum amount of time to wait for events, in
seconds. | [
"Handle",
"any",
"pending",
"event",
".",
"If",
"tv",
"is",
"0",
"will",
"return",
"immediately",
"after",
"handling",
"already",
"-",
"pending",
"events",
".",
"Otherwise",
"defines",
"the",
"maximum",
"amount",
"of",
"time",
"to",
"wait",
"for",
"events",
... | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2437-L2453 | train | 204,713 |
vpelletier/python-libusb1 | usb1/__init__.py | USBContext.getNextTimeout | def getNextTimeout(self):
"""
Returns the next internal timeout that libusb needs to handle, in
seconds, or None if no timeout is needed.
You should not have to call this method, unless you are integrating
this class with a polling mechanism.
"""
timeval = libusb1.timeval()
result = libusb1.libusb_get_next_timeout(
self.__context_p, byref(timeval))
if result == 0:
return None
elif result == 1:
return timeval.tv_sec + (timeval.tv_usec * 0.000001)
raiseUSBError(result) | python | def getNextTimeout(self):
"""
Returns the next internal timeout that libusb needs to handle, in
seconds, or None if no timeout is needed.
You should not have to call this method, unless you are integrating
this class with a polling mechanism.
"""
timeval = libusb1.timeval()
result = libusb1.libusb_get_next_timeout(
self.__context_p, byref(timeval))
if result == 0:
return None
elif result == 1:
return timeval.tv_sec + (timeval.tv_usec * 0.000001)
raiseUSBError(result) | [
"def",
"getNextTimeout",
"(",
"self",
")",
":",
"timeval",
"=",
"libusb1",
".",
"timeval",
"(",
")",
"result",
"=",
"libusb1",
".",
"libusb_get_next_timeout",
"(",
"self",
".",
"__context_p",
",",
"byref",
"(",
"timeval",
")",
")",
"if",
"result",
"==",
... | Returns the next internal timeout that libusb needs to handle, in
seconds, or None if no timeout is needed.
You should not have to call this method, unless you are integrating
this class with a polling mechanism. | [
"Returns",
"the",
"next",
"internal",
"timeout",
"that",
"libusb",
"needs",
"to",
"handle",
"in",
"seconds",
"or",
"None",
"if",
"no",
"timeout",
"is",
"needed",
".",
"You",
"should",
"not",
"have",
"to",
"call",
"this",
"method",
"unless",
"you",
"are",
... | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2487-L2501 | train | 204,714 |
vpelletier/python-libusb1 | usb1/__init__.py | USBContext.waitForEvent | def waitForEvent(self, tv=0):
"""
See libusb_wait_for_event doc.
"""
if tv is None:
tv = 0
tv_s = int(tv)
real_tv = libusb1.timeval(tv_s, int((tv - tv_s) * 1000000))
libusb1.libusb_wait_for_event(self.__context_p, byref(real_tv)) | python | def waitForEvent(self, tv=0):
"""
See libusb_wait_for_event doc.
"""
if tv is None:
tv = 0
tv_s = int(tv)
real_tv = libusb1.timeval(tv_s, int((tv - tv_s) * 1000000))
libusb1.libusb_wait_for_event(self.__context_p, byref(real_tv)) | [
"def",
"waitForEvent",
"(",
"self",
",",
"tv",
"=",
"0",
")",
":",
"if",
"tv",
"is",
"None",
":",
"tv",
"=",
"0",
"tv_s",
"=",
"int",
"(",
"tv",
")",
"real_tv",
"=",
"libusb1",
".",
"timeval",
"(",
"tv_s",
",",
"int",
"(",
"(",
"tv",
"-",
"tv... | See libusb_wait_for_event doc. | [
"See",
"libusb_wait_for_event",
"doc",
"."
] | 740c9778e28523e4ec3543415d95f5400ae0fa24 | https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2534-L2542 | train | 204,715 |
kplindegaard/smbus2 | smbus2/smbus2.py | i2c_msg.read | def read(address, length):
"""
Prepares an i2c read transaction.
:param address: Slave address.
:type: address: int
:param length: Number of bytes to read.
:type: length: int
:return: New :py:class:`i2c_msg` instance for read operation.
:rtype: :py:class:`i2c_msg`
"""
arr = create_string_buffer(length)
return i2c_msg(
addr=address, flags=I2C_M_RD, len=length,
buf=arr) | python | def read(address, length):
"""
Prepares an i2c read transaction.
:param address: Slave address.
:type: address: int
:param length: Number of bytes to read.
:type: length: int
:return: New :py:class:`i2c_msg` instance for read operation.
:rtype: :py:class:`i2c_msg`
"""
arr = create_string_buffer(length)
return i2c_msg(
addr=address, flags=I2C_M_RD, len=length,
buf=arr) | [
"def",
"read",
"(",
"address",
",",
"length",
")",
":",
"arr",
"=",
"create_string_buffer",
"(",
"length",
")",
"return",
"i2c_msg",
"(",
"addr",
"=",
"address",
",",
"flags",
"=",
"I2C_M_RD",
",",
"len",
"=",
"length",
",",
"buf",
"=",
"arr",
")"
] | Prepares an i2c read transaction.
:param address: Slave address.
:type: address: int
:param length: Number of bytes to read.
:type: length: int
:return: New :py:class:`i2c_msg` instance for read operation.
:rtype: :py:class:`i2c_msg` | [
"Prepares",
"an",
"i2c",
"read",
"transaction",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L158-L172 | train | 204,716 |
kplindegaard/smbus2 | smbus2/smbus2.py | i2c_msg.write | def write(address, buf):
"""
Prepares an i2c write transaction.
:param address: Slave address.
:type address: int
:param buf: Bytes to write. Either list of values or str.
:type buf: list
:return: New :py:class:`i2c_msg` instance for write operation.
:rtype: :py:class:`i2c_msg`
"""
if sys.version_info.major >= 3:
if type(buf) is str:
buf = bytes(map(ord, buf))
else:
buf = bytes(buf)
else:
if type(buf) is not str:
buf = ''.join([chr(x) for x in buf])
arr = create_string_buffer(buf, len(buf))
return i2c_msg(
addr=address, flags=0, len=len(arr),
buf=arr) | python | def write(address, buf):
"""
Prepares an i2c write transaction.
:param address: Slave address.
:type address: int
:param buf: Bytes to write. Either list of values or str.
:type buf: list
:return: New :py:class:`i2c_msg` instance for write operation.
:rtype: :py:class:`i2c_msg`
"""
if sys.version_info.major >= 3:
if type(buf) is str:
buf = bytes(map(ord, buf))
else:
buf = bytes(buf)
else:
if type(buf) is not str:
buf = ''.join([chr(x) for x in buf])
arr = create_string_buffer(buf, len(buf))
return i2c_msg(
addr=address, flags=0, len=len(arr),
buf=arr) | [
"def",
"write",
"(",
"address",
",",
"buf",
")",
":",
"if",
"sys",
".",
"version_info",
".",
"major",
">=",
"3",
":",
"if",
"type",
"(",
"buf",
")",
"is",
"str",
":",
"buf",
"=",
"bytes",
"(",
"map",
"(",
"ord",
",",
"buf",
")",
")",
"else",
... | Prepares an i2c write transaction.
:param address: Slave address.
:type address: int
:param buf: Bytes to write. Either list of values or str.
:type buf: list
:return: New :py:class:`i2c_msg` instance for write operation.
:rtype: :py:class:`i2c_msg` | [
"Prepares",
"an",
"i2c",
"write",
"transaction",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L175-L197 | train | 204,717 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.open | def open(self, bus):
"""
Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int
"""
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs() | python | def open(self, bus):
"""
Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int
"""
self.fd = os.open("/dev/i2c-{}".format(bus), os.O_RDWR)
self.funcs = self._get_funcs() | [
"def",
"open",
"(",
"self",
",",
"bus",
")",
":",
"self",
".",
"fd",
"=",
"os",
".",
"open",
"(",
"\"/dev/i2c-{}\"",
".",
"format",
"(",
"bus",
")",
",",
"os",
".",
"O_RDWR",
")",
"self",
".",
"funcs",
"=",
"self",
".",
"_get_funcs",
"(",
")"
] | Open a given i2c bus.
:param bus: i2c bus number (e.g. 0 or 1)
:type bus: int | [
"Open",
"a",
"given",
"i2c",
"bus",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L274-L282 | train | 204,718 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.close | def close(self):
"""
Close the i2c connection.
"""
if self.fd:
os.close(self.fd)
self.fd = None | python | def close(self):
"""
Close the i2c connection.
"""
if self.fd:
os.close(self.fd)
self.fd = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"fd",
":",
"os",
".",
"close",
"(",
"self",
".",
"fd",
")",
"self",
".",
"fd",
"=",
"None"
] | Close the i2c connection. | [
"Close",
"the",
"i2c",
"connection",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L284-L290 | train | 204,719 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus._set_address | def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force | python | def _set_address(self, address, force=None):
"""
Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean
"""
force = force if force is not None else self.force
if self.address != address or self._force_last != force:
if force is True:
ioctl(self.fd, I2C_SLAVE_FORCE, address)
else:
ioctl(self.fd, I2C_SLAVE, address)
self.address = address
self._force_last = force | [
"def",
"_set_address",
"(",
"self",
",",
"address",
",",
"force",
"=",
"None",
")",
":",
"force",
"=",
"force",
"if",
"force",
"is",
"not",
"None",
"else",
"self",
".",
"force",
"if",
"self",
".",
"address",
"!=",
"address",
"or",
"self",
".",
"_forc... | Set i2c slave address to use for subsequent calls.
:param address:
:type address: int
:param force:
:type force: Boolean | [
"Set",
"i2c",
"slave",
"address",
"to",
"use",
"for",
"subsequent",
"calls",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L292-L308 | train | 204,720 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus._get_funcs | def _get_funcs(self):
"""
Returns a 32-bit value stating supported I2C functions.
:rtype: int
"""
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value | python | def _get_funcs(self):
"""
Returns a 32-bit value stating supported I2C functions.
:rtype: int
"""
f = c_uint32()
ioctl(self.fd, I2C_FUNCS, f)
return f.value | [
"def",
"_get_funcs",
"(",
"self",
")",
":",
"f",
"=",
"c_uint32",
"(",
")",
"ioctl",
"(",
"self",
".",
"fd",
",",
"I2C_FUNCS",
",",
"f",
")",
"return",
"f",
".",
"value"
] | Returns a 32-bit value stating supported I2C functions.
:rtype: int | [
"Returns",
"a",
"32",
"-",
"bit",
"value",
"stating",
"supported",
"I2C",
"functions",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L310-L318 | train | 204,721 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.read_byte | def read_byte(self, i2c_addr, force=None):
"""
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte | python | def read_byte(self, i2c_addr, force=None):
"""
Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=0, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte | [
"def",
"read_byte",
"(",
"self",
",",
"i2c_addr",
",",
"force",
"=",
"None",
")",
":",
"self",
".",
"_set_address",
"(",
"i2c_addr",
",",
"force",
"=",
"force",
")",
"msg",
"=",
"i2c_smbus_ioctl_data",
".",
"create",
"(",
"read_write",
"=",
"I2C_SMBUS_READ... | Read a single byte from a device.
:rtype: int
:param i2c_addr: i2c address
:type i2c_addr: int
:param force:
:type force: Boolean
:return: Read byte value | [
"Read",
"a",
"single",
"byte",
"from",
"a",
"device",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L333-L349 | train | 204,722 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.write_byte | def write_byte(self, i2c_addr, value, force=None):
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg) | python | def write_byte(self, i2c_addr, value, force=None):
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=value, size=I2C_SMBUS_BYTE
)
ioctl(self.fd, I2C_SMBUS, msg) | [
"def",
"write_byte",
"(",
"self",
",",
"i2c_addr",
",",
"value",
",",
"force",
"=",
"None",
")",
":",
"self",
".",
"_set_address",
"(",
"i2c_addr",
",",
"force",
"=",
"force",
")",
"msg",
"=",
"i2c_smbus_ioctl_data",
".",
"create",
"(",
"read_write",
"="... | Write a single byte to a device.
:param i2c_addr: i2c address
:type i2c_addr: int
:param value: value to write
:type value: int
:param force:
:type force: Boolean | [
"Write",
"a",
"single",
"byte",
"to",
"a",
"device",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L351-L366 | train | 204,723 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.read_byte_data | def read_byte_data(self, i2c_addr, register, force=None):
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte | python | def read_byte_data(self, i2c_addr, register, force=None):
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BYTE_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.byte | [
"def",
"read_byte_data",
"(",
"self",
",",
"i2c_addr",
",",
"register",
",",
"force",
"=",
"None",
")",
":",
"self",
".",
"_set_address",
"(",
"i2c_addr",
",",
"force",
"=",
"force",
")",
"msg",
"=",
"i2c_smbus_ioctl_data",
".",
"create",
"(",
"read_write"... | Read a single byte from a designated register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: Read byte value
:rtype: int | [
"Read",
"a",
"single",
"byte",
"from",
"a",
"designated",
"register",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L368-L386 | train | 204,724 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.write_byte_data | def write_byte_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg) | python | def write_byte_data(self, i2c_addr, register, value, force=None):
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BYTE_DATA
)
msg.data.contents.byte = value
ioctl(self.fd, I2C_SMBUS, msg) | [
"def",
"write_byte_data",
"(",
"self",
",",
"i2c_addr",
",",
"register",
",",
"value",
",",
"force",
"=",
"None",
")",
":",
"self",
".",
"_set_address",
"(",
"i2c_addr",
",",
"force",
"=",
"force",
")",
"msg",
"=",
"i2c_smbus_ioctl_data",
".",
"create",
... | Write a byte to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to write to
:type register: int
:param value: Byte value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: None | [
"Write",
"a",
"byte",
"to",
"a",
"given",
"register",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L388-L407 | train | 204,725 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.process_call | def process_call(self, i2c_addr, register, value, force=None):
"""
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word | python | def process_call(self, i2c_addr, register, value, force=None):
"""
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL
)
msg.data.contents.word = value
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word | [
"def",
"process_call",
"(",
"self",
",",
"i2c_addr",
",",
"register",
",",
"value",
",",
"force",
"=",
"None",
")",
":",
"self",
".",
"_set_address",
"(",
"i2c_addr",
",",
"force",
"=",
"force",
")",
"msg",
"=",
"i2c_smbus_ioctl_data",
".",
"create",
"("... | Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read/write to
:type register: int
:param value: Word value to transmit
:type value: int
:param force:
:type force: Boolean
:rtype: int | [
"Executes",
"a",
"SMBus",
"Process",
"Call",
"sending",
"a",
"16",
"-",
"bit",
"value",
"and",
"receiving",
"a",
"16",
"-",
"bit",
"response"
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L450-L470 | train | 204,726 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.read_block_data | def read_block_data(self, i2c_addr, register, force=None):
"""
Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1] | python | def read_block_data(self, i2c_addr, register, force=None):
"""
Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_BLOCK_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
length = msg.data.contents.block[0]
return msg.data.contents.block[1:length + 1] | [
"def",
"read_block_data",
"(",
"self",
",",
"i2c_addr",
",",
"register",
",",
"force",
"=",
"None",
")",
":",
"self",
".",
"_set_address",
"(",
"i2c_addr",
",",
"force",
"=",
"force",
")",
"msg",
"=",
"i2c_smbus_ioctl_data",
".",
"create",
"(",
"read_write... | Read a block of up to 32-bytes from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list | [
"Read",
"a",
"block",
"of",
"up",
"to",
"32",
"-",
"bytes",
"from",
"a",
"given",
"register",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L472-L491 | train | 204,727 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.write_block_data | def write_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg) | python | def write_block_data(self, i2c_addr, register, data, force=None):
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None
"""
length = len(data)
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Data length cannot exceed %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_BLOCK_DATA
)
msg.data.contents.block[0] = length
msg.data.contents.block[1:length + 1] = data
ioctl(self.fd, I2C_SMBUS, msg) | [
"def",
"write_block_data",
"(",
"self",
",",
"i2c_addr",
",",
"register",
",",
"data",
",",
"force",
"=",
"None",
")",
":",
"length",
"=",
"len",
"(",
"data",
")",
"if",
"length",
">",
"I2C_SMBUS_BLOCK_MAX",
":",
"raise",
"ValueError",
"(",
"\"Data length ... | Write a block of byte data to a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param data: List of bytes
:type data: list
:param force:
:type force: Boolean
:rtype: None | [
"Write",
"a",
"block",
"of",
"byte",
"data",
"to",
"a",
"given",
"register",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L493-L516 | train | 204,728 |
kplindegaard/smbus2 | smbus2/smbus2.py | SMBus.read_i2c_block_data | def read_i2c_block_data(self, i2c_addr, register, length, force=None):
"""
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1] | python | def read_i2c_block_data(self, i2c_addr, register, length, force=None):
"""
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list
"""
if length > I2C_SMBUS_BLOCK_MAX:
raise ValueError("Desired block length over %d bytes" % I2C_SMBUS_BLOCK_MAX)
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_I2C_BLOCK_DATA
)
msg.data.contents.byte = length
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.block[1:length + 1] | [
"def",
"read_i2c_block_data",
"(",
"self",
",",
"i2c_addr",
",",
"register",
",",
"length",
",",
"force",
"=",
"None",
")",
":",
"if",
"length",
">",
"I2C_SMBUS_BLOCK_MAX",
":",
"raise",
"ValueError",
"(",
"\"Desired block length over %d bytes\"",
"%",
"I2C_SMBUS_... | Read a block of byte data from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Start register
:type register: int
:param length: Desired block length
:type length: int
:param force:
:type force: Boolean
:return: List of bytes
:rtype: list | [
"Read",
"a",
"block",
"of",
"byte",
"data",
"from",
"a",
"given",
"register",
"."
] | a1088a03438dba84c266b73ad61b0c06750d0961 | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L546-L569 | train | 204,729 |
anthill/koala | koala/utils.py | max_dimension | def max_dimension(cellmap, sheet = None):
"""
This function calculates the maximum dimension of the workbook or optionally the worksheet. It returns a tupple
of two integers, the first being the rows and the second being the columns.
:param cellmap: all the cells that should be used to calculate the maximum.
:param sheet: (optionally) a string with the sheet name.
:return: a tupple of two integers, the first being the rows and the second being the columns.
"""
cells = list(cellmap.values())
rows = 0
cols = 0
for cell in cells:
if sheet is None or cell.sheet == sheet:
rows = max(rows, int(cell.row))
cols = max(cols, int(col2num(cell.col)))
return (rows, cols) | python | def max_dimension(cellmap, sheet = None):
"""
This function calculates the maximum dimension of the workbook or optionally the worksheet. It returns a tupple
of two integers, the first being the rows and the second being the columns.
:param cellmap: all the cells that should be used to calculate the maximum.
:param sheet: (optionally) a string with the sheet name.
:return: a tupple of two integers, the first being the rows and the second being the columns.
"""
cells = list(cellmap.values())
rows = 0
cols = 0
for cell in cells:
if sheet is None or cell.sheet == sheet:
rows = max(rows, int(cell.row))
cols = max(cols, int(col2num(cell.col)))
return (rows, cols) | [
"def",
"max_dimension",
"(",
"cellmap",
",",
"sheet",
"=",
"None",
")",
":",
"cells",
"=",
"list",
"(",
"cellmap",
".",
"values",
"(",
")",
")",
"rows",
"=",
"0",
"cols",
"=",
"0",
"for",
"cell",
"in",
"cells",
":",
"if",
"sheet",
"is",
"None",
"... | This function calculates the maximum dimension of the workbook or optionally the worksheet. It returns a tupple
of two integers, the first being the rows and the second being the columns.
:param cellmap: all the cells that should be used to calculate the maximum.
:param sheet: (optionally) a string with the sheet name.
:return: a tupple of two integers, the first being the rows and the second being the columns. | [
"This",
"function",
"calculates",
"the",
"maximum",
"dimension",
"of",
"the",
"workbook",
"or",
"optionally",
"the",
"worksheet",
".",
"It",
"returns",
"a",
"tupple",
"of",
"two",
"integers",
"the",
"first",
"being",
"the",
"rows",
"and",
"the",
"second",
"b... | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/utils.py#L103-L121 | train | 204,730 |
anthill/koala | koala/reader.py | _cast_number | def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value) | python | def _cast_number(value): # source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default
"Convert numbers as string to an int or float"
m = FLOAT_REGEX.search(value)
if m is not None:
return float(value)
return int(value) | [
"def",
"_cast_number",
"(",
"value",
")",
":",
"# source: https://bitbucket.org/openpyxl/openpyxl/src/93604327bce7aac5e8270674579af76d390e09c0/openpyxl/cell/read_only.py?at=default&fileviewer=file-view-default",
"m",
"=",
"FLOAT_REGEX",
".",
"search",
"(",
"value",
")",
"if",
"m",
... | Convert numbers as string to an int or float | [
"Convert",
"numbers",
"as",
"string",
"to",
"an",
"int",
"or",
"float"
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L74-L79 | train | 204,731 |
anthill/koala | koala/reader.py | read_rels | def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ} | python | def read_rels(archive):
"""Read relationships for a workbook"""
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}Relationship' % PKG_REL_NS):
rId = element.get('Id')
pth = element.get("Target")
typ = element.get('Type')
# normalise path
if pth.startswith("/xl"):
pth = pth.replace("/xl", "xl")
elif not pth.startswith("xl") and not pth.startswith(".."):
pth = "xl/" + pth
yield rId, {'path':pth, 'type':typ} | [
"def",
"read_rels",
"(",
"archive",
")",
":",
"xml_source",
"=",
"archive",
".",
"read",
"(",
"ARC_WORKBOOK_RELS",
")",
"tree",
"=",
"fromstring",
"(",
"xml_source",
")",
"for",
"element",
"in",
"safe_iterator",
"(",
"tree",
",",
"'{%s}Relationship'",
"%",
"... | Read relationships for a workbook | [
"Read",
"relationships",
"for",
"a",
"workbook"
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L231-L244 | train | 204,732 |
anthill/koala | koala/reader.py | read_content_types | def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName') | python | def read_content_types(archive):
"""Read content types."""
xml_source = archive.read(ARC_CONTENT_TYPES)
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('ContentType'), type.get('PartName') | [
"def",
"read_content_types",
"(",
"archive",
")",
":",
"xml_source",
"=",
"archive",
".",
"read",
"(",
"ARC_CONTENT_TYPES",
")",
"root",
"=",
"fromstring",
"(",
"xml_source",
")",
"contents_root",
"=",
"root",
".",
"findall",
"(",
"'{%s}Override'",
"%",
"CONTY... | Read content types. | [
"Read",
"content",
"types",
"."
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L246-L252 | train | 204,733 |
anthill/koala | koala/reader.py | read_sheets | def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib | python | def read_sheets(archive):
"""Read worksheet titles and ids for a workbook"""
xml_source = archive.read(ARC_WORKBOOK)
tree = fromstring(xml_source)
for element in safe_iterator(tree, '{%s}sheet' % SHEET_MAIN_NS):
attrib = element.attrib
attrib['id'] = attrib["{%s}id" % REL_NS]
del attrib["{%s}id" % REL_NS]
if attrib['id']:
yield attrib | [
"def",
"read_sheets",
"(",
"archive",
")",
":",
"xml_source",
"=",
"archive",
".",
"read",
"(",
"ARC_WORKBOOK",
")",
"tree",
"=",
"fromstring",
"(",
"xml_source",
")",
"for",
"element",
"in",
"safe_iterator",
"(",
"tree",
",",
"'{%s}sheet'",
"%",
"SHEET_MAIN... | Read worksheet titles and ids for a workbook | [
"Read",
"worksheet",
"titles",
"and",
"ids",
"for",
"a",
"workbook"
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L256-L265 | train | 204,734 |
anthill/koala | koala/reader.py | detect_worksheets | def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel | python | def detect_worksheets(archive):
"""Return a list of worksheets"""
# content types has a list of paths but no titles
# workbook has a list of titles and relIds but no paths
# workbook_rels has a list of relIds and paths but no titles
# rels = {'id':{'title':'', 'path':''} }
content_types = read_content_types(archive)
valid_sheets = dict((path, ct) for ct, path in content_types if ct == WORKSHEET_TYPE)
rels = dict(read_rels(archive))
for sheet in read_sheets(archive):
rel = rels[sheet['id']]
rel['title'] = sheet['name']
rel['sheet_id'] = sheet['sheetId']
rel['state'] = sheet.get('state', 'visible')
if ("/" + rel['path'] in valid_sheets
or "worksheets" in rel['path']): # fallback in case content type is missing
yield rel | [
"def",
"detect_worksheets",
"(",
"archive",
")",
":",
"# content types has a list of paths but no titles",
"# workbook has a list of titles and relIds but no paths",
"# workbook_rels has a list of relIds and paths but no titles",
"# rels = {'id':{'title':'', 'path':''} }",
"content_types",
"="... | Return a list of worksheets | [
"Return",
"a",
"list",
"of",
"worksheets"
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L267-L283 | train | 204,735 |
anthill/koala | koala/reader.py | read_string_table | def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings) | python | def read_string_table(xml_source):
"""Read in all shared strings in the table"""
strings = []
src = _get_xml_iter(xml_source)
for _, node in iterparse(src):
if node.tag == '{%s}si' % SHEET_MAIN_NS:
text = Text.from_tree(node).content
text = text.replace('x005F_', '')
strings.append(text)
node.clear()
return IndexedList(strings) | [
"def",
"read_string_table",
"(",
"xml_source",
")",
":",
"strings",
"=",
"[",
"]",
"src",
"=",
"_get_xml_iter",
"(",
"xml_source",
")",
"for",
"_",
",",
"node",
"in",
"iterparse",
"(",
"src",
")",
":",
"if",
"node",
".",
"tag",
"==",
"'{%s}si'",
"%",
... | Read in all shared strings in the table | [
"Read",
"in",
"all",
"shared",
"strings",
"in",
"the",
"table"
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/reader.py#L285-L299 | train | 204,736 |
anthill/koala | koala/ast/__init__.py | create_node | def create_node(t, ref = None, debug = False):
"""Simple factory function"""
if t.ttype == "operand":
if t.tsubtype in ["range", "named_range", "pointer"] :
# print 'Creating Node', t.tvalue, t.tsubtype
return RangeNode(t, ref, debug = debug)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t, ref, debug = debug)
elif t.ttype.startswith("operator"):
return OperatorNode(t, ref, debug = debug)
else:
return ASTNode(t, debug = debug) | python | def create_node(t, ref = None, debug = False):
"""Simple factory function"""
if t.ttype == "operand":
if t.tsubtype in ["range", "named_range", "pointer"] :
# print 'Creating Node', t.tvalue, t.tsubtype
return RangeNode(t, ref, debug = debug)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t, ref, debug = debug)
elif t.ttype.startswith("operator"):
return OperatorNode(t, ref, debug = debug)
else:
return ASTNode(t, debug = debug) | [
"def",
"create_node",
"(",
"t",
",",
"ref",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"if",
"t",
".",
"ttype",
"==",
"\"operand\"",
":",
"if",
"t",
".",
"tsubtype",
"in",
"[",
"\"range\"",
",",
"\"named_range\"",
",",
"\"pointer\"",
"]",
":"... | Simple factory function | [
"Simple",
"factory",
"function"
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L18-L31 | train | 204,737 |
anthill/koala | koala/ast/__init__.py | build_ast | def build_ast(expression, debug = False):
"""build an AST from an Excel formula expression in reverse polish notation"""
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop() | python | def build_ast(expression, debug = False):
"""build an AST from an Excel formula expression in reverse polish notation"""
#use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n,OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
# Hack to write the name of sheet in 2argument address
if(n.tvalue == ':'):
if '!' in arg1.tvalue and arg2.ttype == 'operand' and '!' not in arg2.tvalue:
arg2.tvalue = arg1.tvalue.split('!')[0] + '!' + arg2.tvalue
G.add_node(arg1,pos = 1)
G.add_node(arg2,pos = 2)
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1,pos = 1)
G.add_edge(arg1, n)
elif isinstance(n,FunctionNode):
args = []
for _ in range(n.num_args):
try:
args.append(stack.pop())
except:
raise Exception()
#try:
# args = [stack.pop() for _ in range(n.num_args)]
#except:
# print 'STACK', stack, type(n)
# raise Exception('prut')
args.reverse()
for i,a in enumerate(args):
G.add_node(a,pos = i)
G.add_edge(a,n)
else:
G.add_node(n,pos=0)
stack.append(n)
return G,stack.pop() | [
"def",
"build_ast",
"(",
"expression",
",",
"debug",
"=",
"False",
")",
":",
"#use a directed graph to store the tree",
"G",
"=",
"DiGraph",
"(",
")",
"stack",
"=",
"[",
"]",
"for",
"n",
"in",
"expression",
":",
"# Since the graph does not maintain the order of addi... | build an AST from an Excel formula expression in reverse polish notation | [
"build",
"an",
"AST",
"from",
"an",
"Excel",
"formula",
"expression",
"in",
"reverse",
"polish",
"notation"
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L271-L320 | train | 204,738 |
anthill/koala | koala/ast/__init__.py | cell2code | def cell2code(cell, named_ranges):
"""Generate python code for the given cell"""
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast | python | def cell2code(cell, named_ranges):
"""Generate python code for the given cell"""
if cell.formula:
debug = False
# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:
# debug = True
# if debug:
# print 'FORMULA', cell.formula
ref = parse_cell_address(cell.address()) if not cell.is_named_range else None
sheet = cell.sheet
e = shunting_yard(cell.formula, named_ranges, ref=ref, tokenize_range = False)
ast,root = build_ast(e, debug = debug)
code = root.emit(ast, context=sheet)
# print 'CODE', code, ref
else:
ast = None
if isinstance(cell.value, unicode):
code = u'u"' + cell.value.replace(u'"', u'\\"') + u'"'
elif isinstance(cell.value, str):
raise RuntimeError("Got unexpected non-unicode str")
else:
code = str(cell.value)
return code,ast | [
"def",
"cell2code",
"(",
"cell",
",",
"named_ranges",
")",
":",
"if",
"cell",
".",
"formula",
":",
"debug",
"=",
"False",
"# if 'OFFSET' in cell.formula or 'INDEX' in cell.formula:",
"# debug = True",
"# if debug:",
"# print 'FORMULA', cell.formula",
"ref",
"=",
"... | Generate python code for the given cell | [
"Generate",
"python",
"code",
"for",
"the",
"given",
"cell"
] | 393089fe081380506e73235db18a32b4e078d222 | https://github.com/anthill/koala/blob/393089fe081380506e73235db18a32b4e078d222/koala/ast/__init__.py#L358-L386 | train | 204,739 |
GaretJax/sphinx-autobuild | sphinx_autobuild/__init__.py | LivereloadWatchdogWatcher.examine | def examine(self):
"""
Called by LiveReloadHandler's poll_tasks method.
If a boolean true value is returned, then the waiters (browsers) are
reloaded.
"""
if self._changes:
return self._changes.pop()
action_file = None
if self._changed:
self._changed = False
action_file = self._action_file or True # TODO: Hack (see above)
return action_file, None | python | def examine(self):
"""
Called by LiveReloadHandler's poll_tasks method.
If a boolean true value is returned, then the waiters (browsers) are
reloaded.
"""
if self._changes:
return self._changes.pop()
action_file = None
if self._changed:
self._changed = False
action_file = self._action_file or True # TODO: Hack (see above)
return action_file, None | [
"def",
"examine",
"(",
"self",
")",
":",
"if",
"self",
".",
"_changes",
":",
"return",
"self",
".",
"_changes",
".",
"pop",
"(",
")",
"action_file",
"=",
"None",
"if",
"self",
".",
"_changed",
":",
"self",
".",
"_changed",
"=",
"False",
"action_file",
... | Called by LiveReloadHandler's poll_tasks method.
If a boolean true value is returned, then the waiters (browsers) are
reloaded. | [
"Called",
"by",
"LiveReloadHandler",
"s",
"poll_tasks",
"method",
"."
] | e0f40b6359ca00a48ca5ba903de1e25da27de453 | https://github.com/GaretJax/sphinx-autobuild/blob/e0f40b6359ca00a48ca5ba903de1e25da27de453/sphinx_autobuild/__init__.py#L93-L107 | train | 204,740 |
GaretJax/sphinx-autobuild | sphinx_autobuild/__init__.py | LivereloadWatchdogWatcher.watch | def watch(self, path, action, *args, **kwargs):
"""
Called by the Server instance when a new watch task is requested.
"""
if action is None:
action = _set_changed
event_handler = _WatchdogHandler(self, action)
self._observer.schedule(event_handler, path=path, recursive=True) | python | def watch(self, path, action, *args, **kwargs):
"""
Called by the Server instance when a new watch task is requested.
"""
if action is None:
action = _set_changed
event_handler = _WatchdogHandler(self, action)
self._observer.schedule(event_handler, path=path, recursive=True) | [
"def",
"watch",
"(",
"self",
",",
"path",
",",
"action",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"action",
"is",
"None",
":",
"action",
"=",
"_set_changed",
"event_handler",
"=",
"_WatchdogHandler",
"(",
"self",
",",
"action",
")",
... | Called by the Server instance when a new watch task is requested. | [
"Called",
"by",
"the",
"Server",
"instance",
"when",
"a",
"new",
"watch",
"task",
"is",
"requested",
"."
] | e0f40b6359ca00a48ca5ba903de1e25da27de453 | https://github.com/GaretJax/sphinx-autobuild/blob/e0f40b6359ca00a48ca5ba903de1e25da27de453/sphinx_autobuild/__init__.py#L109-L116 | train | 204,741 |
GaretJax/sphinx-autobuild | fabtasks/packaging.py | authors | def authors():
"""
Updates the AUTHORS file with a list of committers from GIT.
"""
fmt_re = re.compile(r'([^<]+) <([^>]+)>')
authors = local('git shortlog -s -e -n | cut -f 2-', capture=True)
with open('AUTHORS', 'w') as fh:
fh.write('Project contributors\n')
fh.write('====================\n\n')
for line in authors.splitlines():
match = fmt_re.match(line)
name, email = match.groups()
if email in env.ignored_authors:
continue
fh.write(' * ')
fh.write(line)
fh.write('\n') | python | def authors():
"""
Updates the AUTHORS file with a list of committers from GIT.
"""
fmt_re = re.compile(r'([^<]+) <([^>]+)>')
authors = local('git shortlog -s -e -n | cut -f 2-', capture=True)
with open('AUTHORS', 'w') as fh:
fh.write('Project contributors\n')
fh.write('====================\n\n')
for line in authors.splitlines():
match = fmt_re.match(line)
name, email = match.groups()
if email in env.ignored_authors:
continue
fh.write(' * ')
fh.write(line)
fh.write('\n') | [
"def",
"authors",
"(",
")",
":",
"fmt_re",
"=",
"re",
".",
"compile",
"(",
"r'([^<]+) <([^>]+)>'",
")",
"authors",
"=",
"local",
"(",
"'git shortlog -s -e -n | cut -f 2-'",
",",
"capture",
"=",
"True",
")",
"with",
"open",
"(",
"'AUTHORS'",
",",
"'w'",
")",
... | Updates the AUTHORS file with a list of committers from GIT. | [
"Updates",
"the",
"AUTHORS",
"file",
"with",
"a",
"list",
"of",
"committers",
"from",
"GIT",
"."
] | e0f40b6359ca00a48ca5ba903de1e25da27de453 | https://github.com/GaretJax/sphinx-autobuild/blob/e0f40b6359ca00a48ca5ba903de1e25da27de453/fabtasks/packaging.py#L36-L52 | train | 204,742 |
GaretJax/sphinx-autobuild | fabtasks/packaging.py | release | def release():
"""
Create a new release and upload it to PyPI.
"""
if not is_working_tree_clean():
print('Your working tree is not clean. Refusing to create a release.')
return
print('Rebuilding the AUTHORS file to check for modifications...')
authors()
if not is_working_tree_clean():
print('Your working tree is not clean after the AUTHORS file was '
'rebuilt.')
print('Please commit the changes before continuing.')
return
if not is_manifest_up_to_date():
print('Manifest is not up to date.')
print('Please update MANIFEST.in or remove spurious files.')
return
# Get version
version = 'v{}'.format(local('python setup.py --version', capture=True))
name = local('python setup.py --name', capture=True)
# Tag
tag_message = '{} release version {}.'.format(name, version)
print('----------------------')
print('Proceeding will tag the release, push the repository upstream,')
print('and release a new version on PyPI.')
print()
print('Version: {}'.format(version))
print('Tag message: {}'.format(tag_message))
print()
if not confirm('Continue?', default=True):
print('Aborting.')
return
local('git tag -a {} -m {}'.format(pipes.quote(version),
pipes.quote(tag_message)))
# Push
local('git push --tags origin develop')
# Package and upload to pypi
local('python setup.py sdist bdist_wheel upload') | python | def release():
"""
Create a new release and upload it to PyPI.
"""
if not is_working_tree_clean():
print('Your working tree is not clean. Refusing to create a release.')
return
print('Rebuilding the AUTHORS file to check for modifications...')
authors()
if not is_working_tree_clean():
print('Your working tree is not clean after the AUTHORS file was '
'rebuilt.')
print('Please commit the changes before continuing.')
return
if not is_manifest_up_to_date():
print('Manifest is not up to date.')
print('Please update MANIFEST.in or remove spurious files.')
return
# Get version
version = 'v{}'.format(local('python setup.py --version', capture=True))
name = local('python setup.py --name', capture=True)
# Tag
tag_message = '{} release version {}.'.format(name, version)
print('----------------------')
print('Proceeding will tag the release, push the repository upstream,')
print('and release a new version on PyPI.')
print()
print('Version: {}'.format(version))
print('Tag message: {}'.format(tag_message))
print()
if not confirm('Continue?', default=True):
print('Aborting.')
return
local('git tag -a {} -m {}'.format(pipes.quote(version),
pipes.quote(tag_message)))
# Push
local('git push --tags origin develop')
# Package and upload to pypi
local('python setup.py sdist bdist_wheel upload') | [
"def",
"release",
"(",
")",
":",
"if",
"not",
"is_working_tree_clean",
"(",
")",
":",
"print",
"(",
"'Your working tree is not clean. Refusing to create a release.'",
")",
"return",
"print",
"(",
"'Rebuilding the AUTHORS file to check for modifications...'",
")",
"authors",
... | Create a new release and upload it to PyPI. | [
"Create",
"a",
"new",
"release",
"and",
"upload",
"it",
"to",
"PyPI",
"."
] | e0f40b6359ca00a48ca5ba903de1e25da27de453 | https://github.com/GaretJax/sphinx-autobuild/blob/e0f40b6359ca00a48ca5ba903de1e25da27de453/fabtasks/packaging.py#L56-L104 | train | 204,743 |
MycroftAI/adapt | adapt/tools/text/trie.py | TrieNode.insert | def insert(self, iterable, index=0, data=None, weight=1.0):
"""Insert new node into tree
Args:
iterable(hashable): key used to find in the future.
data(object): data associated with the key
index(int): an index used for insertion.
weight(float): the wait given for the item added.
"""
if index == len(iterable):
self.is_terminal = True
self.key = iterable
self.weight = weight
if data:
self.data.add(data)
else:
if iterable[index] not in self.children:
self.children[iterable[index]] = TrieNode()
self.children[iterable[index]].insert(iterable, index + 1, data) | python | def insert(self, iterable, index=0, data=None, weight=1.0):
"""Insert new node into tree
Args:
iterable(hashable): key used to find in the future.
data(object): data associated with the key
index(int): an index used for insertion.
weight(float): the wait given for the item added.
"""
if index == len(iterable):
self.is_terminal = True
self.key = iterable
self.weight = weight
if data:
self.data.add(data)
else:
if iterable[index] not in self.children:
self.children[iterable[index]] = TrieNode()
self.children[iterable[index]].insert(iterable, index + 1, data) | [
"def",
"insert",
"(",
"self",
",",
"iterable",
",",
"index",
"=",
"0",
",",
"data",
"=",
"None",
",",
"weight",
"=",
"1.0",
")",
":",
"if",
"index",
"==",
"len",
"(",
"iterable",
")",
":",
"self",
".",
"is_terminal",
"=",
"True",
"self",
".",
"ke... | Insert new node into tree
Args:
iterable(hashable): key used to find in the future.
data(object): data associated with the key
index(int): an index used for insertion.
weight(float): the wait given for the item added. | [
"Insert",
"new",
"node",
"into",
"tree"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/tools/text/trie.py#L86-L104 | train | 204,744 |
MycroftAI/adapt | adapt/tools/text/trie.py | TrieNode.remove | def remove(self, iterable, data=None, index=0):
"""Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed
"""
if index == len(iterable):
if self.is_terminal:
if data:
self.data.remove(data)
if len(self.data) == 0:
self.is_terminal = False
else:
self.data.clear()
self.is_terminal = False
return True
else:
return False
elif iterable[index] in self.children:
return self.children[iterable[index]].remove(iterable, index=index+1, data=data)
else:
return False | python | def remove(self, iterable, data=None, index=0):
"""Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed
"""
if index == len(iterable):
if self.is_terminal:
if data:
self.data.remove(data)
if len(self.data) == 0:
self.is_terminal = False
else:
self.data.clear()
self.is_terminal = False
return True
else:
return False
elif iterable[index] in self.children:
return self.children[iterable[index]].remove(iterable, index=index+1, data=data)
else:
return False | [
"def",
"remove",
"(",
"self",
",",
"iterable",
",",
"data",
"=",
"None",
",",
"index",
"=",
"0",
")",
":",
"if",
"index",
"==",
"len",
"(",
"iterable",
")",
":",
"if",
"self",
".",
"is_terminal",
":",
"if",
"data",
":",
"self",
".",
"data",
".",
... | Remove an element from the trie
Args
iterable(hashable): key used to find what is to be removed
data(object): data associated with the key
index(int): index of what is to me removed
Returns:
bool:
True: if it was removed
False: if it was not removed | [
"Remove",
"an",
"element",
"from",
"the",
"trie"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/tools/text/trie.py#L112-L140 | train | 204,745 |
MycroftAI/adapt | adapt/tools/text/trie.py | Trie.gather | def gather(self, iterable):
"""Calls the lookup with gather True Passing iterable and yields
the result.
"""
for result in self.lookup(iterable, gather=True):
yield result | python | def gather(self, iterable):
"""Calls the lookup with gather True Passing iterable and yields
the result.
"""
for result in self.lookup(iterable, gather=True):
yield result | [
"def",
"gather",
"(",
"self",
",",
"iterable",
")",
":",
"for",
"result",
"in",
"self",
".",
"lookup",
"(",
"iterable",
",",
"gather",
"=",
"True",
")",
":",
"yield",
"result"
] | Calls the lookup with gather True Passing iterable and yields
the result. | [
"Calls",
"the",
"lookup",
"with",
"gather",
"True",
"Passing",
"iterable",
"and",
"yields",
"the",
"result",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/tools/text/trie.py#L170-L175 | train | 204,746 |
MycroftAI/adapt | adapt/tools/text/trie.py | Trie.lookup | def lookup(self, iterable, gather=False):
"""Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init
"""
for result in self.root.lookup(iterable,
gather=gather,
edit_distance=0,
max_edit_distance=self.max_edit_distance,
match_threshold=self.match_threshold):
yield result | python | def lookup(self, iterable, gather=False):
"""Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init
"""
for result in self.root.lookup(iterable,
gather=gather,
edit_distance=0,
max_edit_distance=self.max_edit_distance,
match_threshold=self.match_threshold):
yield result | [
"def",
"lookup",
"(",
"self",
",",
"iterable",
",",
"gather",
"=",
"False",
")",
":",
"for",
"result",
"in",
"self",
".",
"root",
".",
"lookup",
"(",
"iterable",
",",
"gather",
"=",
"gather",
",",
"edit_distance",
"=",
"0",
",",
"max_edit_distance",
"=... | Call the lookup on the root node with the given parameters.
Args
iterable(index or key): Used to retrive nodes from tree
gather(bool): this is passed down to the root node lookup
Notes:
max_edit_distance and match_threshold come from the init | [
"Call",
"the",
"lookup",
"on",
"the",
"root",
"node",
"with",
"the",
"given",
"parameters",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/tools/text/trie.py#L177-L192 | train | 204,747 |
MycroftAI/adapt | adapt/tools/text/trie.py | Trie.insert | def insert(self, iterable, data=None, weight=1.0):
"""Used to insert into he root node
Args
iterable(hashable): index or key used to identify
data(object): data to be paired with the key
"""
self.root.insert(iterable, index=0, data=data, weight=1.0) | python | def insert(self, iterable, data=None, weight=1.0):
"""Used to insert into he root node
Args
iterable(hashable): index or key used to identify
data(object): data to be paired with the key
"""
self.root.insert(iterable, index=0, data=data, weight=1.0) | [
"def",
"insert",
"(",
"self",
",",
"iterable",
",",
"data",
"=",
"None",
",",
"weight",
"=",
"1.0",
")",
":",
"self",
".",
"root",
".",
"insert",
"(",
"iterable",
",",
"index",
"=",
"0",
",",
"data",
"=",
"data",
",",
"weight",
"=",
"1.0",
")"
] | Used to insert into he root node
Args
iterable(hashable): index or key used to identify
data(object): data to be paired with the key | [
"Used",
"to",
"insert",
"into",
"he",
"root",
"node"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/tools/text/trie.py#L194-L201 | train | 204,748 |
MycroftAI/adapt | adapt/tools/text/trie.py | Trie.remove | def remove(self, iterable, data=None):
"""Used to remove from the root node
Args:
iterable(hashable): index or key used to identify
item to remove
data: data to be paired with the key
"""
return self.root.remove(iterable, data=data) | python | def remove(self, iterable, data=None):
"""Used to remove from the root node
Args:
iterable(hashable): index or key used to identify
item to remove
data: data to be paired with the key
"""
return self.root.remove(iterable, data=data) | [
"def",
"remove",
"(",
"self",
",",
"iterable",
",",
"data",
"=",
"None",
")",
":",
"return",
"self",
".",
"root",
".",
"remove",
"(",
"iterable",
",",
"data",
"=",
"data",
")"
] | Used to remove from the root node
Args:
iterable(hashable): index or key used to identify
item to remove
data: data to be paired with the key | [
"Used",
"to",
"remove",
"from",
"the",
"root",
"node"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/tools/text/trie.py#L203-L211 | train | 204,749 |
MycroftAI/adapt | adapt/expander.py | bronk | def bronk(r, p, x, graph):
"""This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies
"""
if len(p) == 0 and len(x) == 0:
yield r
return
for vertex in p[:]:
r_new = r[::]
r_new.append(vertex)
p_new = [val for val in p if val in graph.get_neighbors_of(vertex)] # p intersects N(vertex)
x_new = [val for val in x if val in graph.get_neighbors_of(vertex)] # x intersects N(vertex)
for result in bronk(r_new, p_new, x_new, graph):
yield result
p.remove(vertex)
x.append(vertex) | python | def bronk(r, p, x, graph):
"""This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies
"""
if len(p) == 0 and len(x) == 0:
yield r
return
for vertex in p[:]:
r_new = r[::]
r_new.append(vertex)
p_new = [val for val in p if val in graph.get_neighbors_of(vertex)] # p intersects N(vertex)
x_new = [val for val in x if val in graph.get_neighbors_of(vertex)] # x intersects N(vertex)
for result in bronk(r_new, p_new, x_new, graph):
yield result
p.remove(vertex)
x.append(vertex) | [
"def",
"bronk",
"(",
"r",
",",
"p",
",",
"x",
",",
"graph",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"0",
"and",
"len",
"(",
"x",
")",
"==",
"0",
":",
"yield",
"r",
"return",
"for",
"vertex",
"in",
"p",
"[",
":",
"]",
":",
"r_new",
"="... | This is used to fine cliques and remove them from graph
Args:
graph (graph): this is the graph of verticies to search for
cliques
p (list): this is a list of the verticies to search
r (list): used by bronk for the search
x (list): used by bronk for the search
Yields:
list : found clique of the given graph and verticies | [
"This",
"is",
"used",
"to",
"fine",
"cliques",
"and",
"remove",
"them",
"from",
"graph"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/expander.py#L79-L103 | train | 204,750 |
MycroftAI/adapt | adapt/expander.py | graph_key_from_tag | def graph_key_from_tag(tag, entity_index):
"""Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity.
"""
start_token = tag.get('start_token')
entity = tag.get('entities', [])[entity_index]
return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence')) | python | def graph_key_from_tag(tag, entity_index):
"""Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity.
"""
start_token = tag.get('start_token')
entity = tag.get('entities', [])[entity_index]
return str(start_token) + '-' + entity.get('key') + '-' + str(entity.get('confidence')) | [
"def",
"graph_key_from_tag",
"(",
"tag",
",",
"entity_index",
")",
":",
"start_token",
"=",
"tag",
".",
"get",
"(",
"'start_token'",
")",
"entity",
"=",
"tag",
".",
"get",
"(",
"'entities'",
",",
"[",
"]",
")",
"[",
"entity_index",
"]",
"return",
"str",
... | Returns a key from a tag entity
Args:
tag (tag) : this is the tag selected to get the key from
entity_index (int) : this is the index of the tagged entity
Returns:
str : String representing the key for the given tagged entity. | [
"Returns",
"a",
"key",
"from",
"a",
"tag",
"entity"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/expander.py#L120-L132 | train | 204,751 |
MycroftAI/adapt | adapt/expander.py | SimpleGraph.add_edge | def add_edge(self, a, b):
"""Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge
"""
neighbors_of_a = self.adjacency_lists.get(a)
if not neighbors_of_a:
neighbors_of_a = set()
self.adjacency_lists[a] = neighbors_of_a
neighbors_of_a.add(b)
neighbors_of_b = self.adjacency_lists.get(b)
if not neighbors_of_b:
neighbors_of_b = set()
self.adjacency_lists[b] = neighbors_of_b
neighbors_of_b.add(a) | python | def add_edge(self, a, b):
"""Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge
"""
neighbors_of_a = self.adjacency_lists.get(a)
if not neighbors_of_a:
neighbors_of_a = set()
self.adjacency_lists[a] = neighbors_of_a
neighbors_of_a.add(b)
neighbors_of_b = self.adjacency_lists.get(b)
if not neighbors_of_b:
neighbors_of_b = set()
self.adjacency_lists[b] = neighbors_of_b
neighbors_of_b.add(a) | [
"def",
"add_edge",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"neighbors_of_a",
"=",
"self",
".",
"adjacency_lists",
".",
"get",
"(",
"a",
")",
"if",
"not",
"neighbors_of_a",
":",
"neighbors_of_a",
"=",
"set",
"(",
")",
"self",
".",
"adjacency_lists",
... | Used to add edges to the graph. 'a' and 'b' are vertexes and
if 'a' or 'b' doesn't exisit then the vertex is created
Args:
a (hash): is one vertex of the edge
b (hash): is another vertext of the edge | [
"Used",
"to",
"add",
"edges",
"to",
"the",
"graph",
".",
"a",
"and",
"b",
"are",
"vertexes",
"and",
"if",
"a",
"or",
"b",
"doesn",
"t",
"exisit",
"then",
"the",
"vertex",
"is",
"created"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/expander.py#L32-L54 | train | 204,752 |
MycroftAI/adapt | adapt/expander.py | Lattice.append | def append(self, data):
"""Appends items or lists to the Lattice
Args:
data (item,list) : The Item or List to be added to the Lattice
"""
if isinstance(data, list) and len(data) > 0:
self.nodes.append(data)
else:
self.nodes.append([data]) | python | def append(self, data):
"""Appends items or lists to the Lattice
Args:
data (item,list) : The Item or List to be added to the Lattice
"""
if isinstance(data, list) and len(data) > 0:
self.nodes.append(data)
else:
self.nodes.append([data]) | [
"def",
"append",
"(",
"self",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
"and",
"len",
"(",
"data",
")",
">",
"0",
":",
"self",
".",
"nodes",
".",
"append",
"(",
"data",
")",
"else",
":",
"self",
".",
"nodes",
"."... | Appends items or lists to the Lattice
Args:
data (item,list) : The Item or List to be added to the Lattice | [
"Appends",
"items",
"or",
"lists",
"to",
"the",
"Lattice"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/expander.py#L148-L157 | train | 204,753 |
MycroftAI/adapt | adapt/expander.py | Lattice.traverse | def traverse(self, index=0):
""" This is used to produce a list of lists where each each item
in that list is a diffrent combination of items from the lists
within with every combination of such values.
Args:
index (int) : the index at witch to start the list.
Note this is used only in the function as a processing
Returns:
list : is every combination.
"""
if index < len(self.nodes):
for entity in self.nodes[index]:
for next_result in self.traverse(index=index+1):
if isinstance(entity, list):
yield entity + next_result
else:
yield [entity] + next_result
else:
yield [] | python | def traverse(self, index=0):
""" This is used to produce a list of lists where each each item
in that list is a diffrent combination of items from the lists
within with every combination of such values.
Args:
index (int) : the index at witch to start the list.
Note this is used only in the function as a processing
Returns:
list : is every combination.
"""
if index < len(self.nodes):
for entity in self.nodes[index]:
for next_result in self.traverse(index=index+1):
if isinstance(entity, list):
yield entity + next_result
else:
yield [entity] + next_result
else:
yield [] | [
"def",
"traverse",
"(",
"self",
",",
"index",
"=",
"0",
")",
":",
"if",
"index",
"<",
"len",
"(",
"self",
".",
"nodes",
")",
":",
"for",
"entity",
"in",
"self",
".",
"nodes",
"[",
"index",
"]",
":",
"for",
"next_result",
"in",
"self",
".",
"trave... | This is used to produce a list of lists where each each item
in that list is a diffrent combination of items from the lists
within with every combination of such values.
Args:
index (int) : the index at witch to start the list.
Note this is used only in the function as a processing
Returns:
list : is every combination. | [
"This",
"is",
"used",
"to",
"produce",
"a",
"list",
"of",
"lists",
"where",
"each",
"each",
"item",
"in",
"that",
"list",
"is",
"a",
"diffrent",
"combination",
"of",
"items",
"from",
"the",
"lists",
"within",
"with",
"every",
"combination",
"of",
"such",
... | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/expander.py#L159-L179 | train | 204,754 |
MycroftAI/adapt | adapt/expander.py | BronKerboschExpander._build_graph | def _build_graph(self, tags):
"""Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
"""
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name)
return graph | python | def _build_graph(self, tags):
"""Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities.
"""
graph = SimpleGraph()
for tag_index in xrange(len(tags)):
for entity_index in xrange(len(tags[tag_index].get('entities'))):
a_entity_name = graph_key_from_tag(tags[tag_index], entity_index)
tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match'))
for tag in tags[tag_index + 1:]:
start_token = tag.get('start_token')
if start_token >= tags[tag_index].get('start_token') + len(tokens):
for b_entity_index in xrange(len(tag.get('entities'))):
b_entity_name = graph_key_from_tag(tag, b_entity_index)
graph.add_edge(a_entity_name, b_entity_name)
return graph | [
"def",
"_build_graph",
"(",
"self",
",",
"tags",
")",
":",
"graph",
"=",
"SimpleGraph",
"(",
")",
"for",
"tag_index",
"in",
"xrange",
"(",
"len",
"(",
"tags",
")",
")",
":",
"for",
"entity_index",
"in",
"xrange",
"(",
"len",
"(",
"tags",
"[",
"tag_in... | Builds a graph from the entities included in the tags.
Note this is used internally.
Args:
tags (list): A list of the tags to include in graph
Returns:
graph : this is the resulting graph of the tagged entities. | [
"Builds",
"a",
"graph",
"from",
"the",
"entities",
"included",
"in",
"the",
"tags",
".",
"Note",
"this",
"is",
"used",
"internally",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/expander.py#L202-L225 | train | 204,755 |
MycroftAI/adapt | adapt/expander.py | BronKerboschExpander._sub_expand | def _sub_expand(self, tags):
"""This called by expand to find cliques
Args:
tags (list): a list of the tags used to get cliques
Yields:
list : list of sorted tags by start_token this is a clique
"""
entities = {}
graph = self._build_graph(tags)
# name entities
for tag in tags:
for entity_index in xrange(len(tag.get('entities'))):
node_name = graph_key_from_tag(tag, entity_index)
if not node_name in entities:
entities[node_name] = []
entities[node_name] += [
tag.get('entities', [])[entity_index],
tag.get('entities', [])[entity_index].get('confidence'),
tag
]
for clique in get_cliques(list(entities), graph):
result = []
for entity_name in clique:
start_token = int(entity_name.split("-")[0])
old_tag = entities[entity_name][2]
tag = {
'start_token': start_token,
'entities': [entities.get(entity_name)[0]],
'confidence': entities.get(entity_name)[1] * old_tag.get('confidence', 1.0),
'end_token': old_tag.get('end_token'),
'match': old_tag.get('entities')[0].get('match'),
'key': old_tag.get('entities')[0].get('key'),
'from_context': old_tag.get('from_context', False)
}
result.append(tag)
result = sorted(result, key=lambda e: e.get('start_token'))
yield result | python | def _sub_expand(self, tags):
"""This called by expand to find cliques
Args:
tags (list): a list of the tags used to get cliques
Yields:
list : list of sorted tags by start_token this is a clique
"""
entities = {}
graph = self._build_graph(tags)
# name entities
for tag in tags:
for entity_index in xrange(len(tag.get('entities'))):
node_name = graph_key_from_tag(tag, entity_index)
if not node_name in entities:
entities[node_name] = []
entities[node_name] += [
tag.get('entities', [])[entity_index],
tag.get('entities', [])[entity_index].get('confidence'),
tag
]
for clique in get_cliques(list(entities), graph):
result = []
for entity_name in clique:
start_token = int(entity_name.split("-")[0])
old_tag = entities[entity_name][2]
tag = {
'start_token': start_token,
'entities': [entities.get(entity_name)[0]],
'confidence': entities.get(entity_name)[1] * old_tag.get('confidence', 1.0),
'end_token': old_tag.get('end_token'),
'match': old_tag.get('entities')[0].get('match'),
'key': old_tag.get('entities')[0].get('key'),
'from_context': old_tag.get('from_context', False)
}
result.append(tag)
result = sorted(result, key=lambda e: e.get('start_token'))
yield result | [
"def",
"_sub_expand",
"(",
"self",
",",
"tags",
")",
":",
"entities",
"=",
"{",
"}",
"graph",
"=",
"self",
".",
"_build_graph",
"(",
"tags",
")",
"# name entities",
"for",
"tag",
"in",
"tags",
":",
"for",
"entity_index",
"in",
"xrange",
"(",
"len",
"("... | This called by expand to find cliques
Args:
tags (list): a list of the tags used to get cliques
Yields:
list : list of sorted tags by start_token this is a clique | [
"This",
"called",
"by",
"expand",
"to",
"find",
"cliques"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/expander.py#L227-L267 | train | 204,756 |
MycroftAI/adapt | adapt/expander.py | BronKerboschExpander.expand | def expand(self, tags, clique_scoring_func=None):
"""This is the main function to expand tags into cliques
Args:
tags (list): a list of tags to find the cliques.
clique_scoring_func (func): a function that returns a float
value for the clique
Returns:
list : a list of cliques
"""
lattice = Lattice()
overlapping_spans = []
def end_token_index():
return max([t.get('end_token') for t in overlapping_spans])
for i in xrange(len(tags)):
tag = tags[i]
if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):
overlapping_spans.append(tag)
elif len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
overlapping_spans = [tag]
else:
lattice.append(overlapping_spans)
overlapping_spans = [tag]
if len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
else:
lattice.append(overlapping_spans)
return lattice.traverse() | python | def expand(self, tags, clique_scoring_func=None):
"""This is the main function to expand tags into cliques
Args:
tags (list): a list of tags to find the cliques.
clique_scoring_func (func): a function that returns a float
value for the clique
Returns:
list : a list of cliques
"""
lattice = Lattice()
overlapping_spans = []
def end_token_index():
return max([t.get('end_token') for t in overlapping_spans])
for i in xrange(len(tags)):
tag = tags[i]
if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):
overlapping_spans.append(tag)
elif len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
overlapping_spans = [tag]
else:
lattice.append(overlapping_spans)
overlapping_spans = [tag]
if len(overlapping_spans) > 1:
cliques = list(self._sub_expand(overlapping_spans))
if clique_scoring_func:
cliques = sorted(cliques, key=lambda e: -1 * clique_scoring_func(e))
lattice.append(cliques)
else:
lattice.append(overlapping_spans)
return lattice.traverse() | [
"def",
"expand",
"(",
"self",
",",
"tags",
",",
"clique_scoring_func",
"=",
"None",
")",
":",
"lattice",
"=",
"Lattice",
"(",
")",
"overlapping_spans",
"=",
"[",
"]",
"def",
"end_token_index",
"(",
")",
":",
"return",
"max",
"(",
"[",
"t",
".",
"get",
... | This is the main function to expand tags into cliques
Args:
tags (list): a list of tags to find the cliques.
clique_scoring_func (func): a function that returns a float
value for the clique
Returns:
list : a list of cliques | [
"This",
"is",
"the",
"main",
"function",
"to",
"expand",
"tags",
"into",
"cliques"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/expander.py#L269-L308 | train | 204,757 |
MycroftAI/adapt | adapt/entity_tagger.py | EntityTagger._iterate_subsequences | def _iterate_subsequences(self, tokens):
"""
Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ?
"""
for start_idx in xrange(len(tokens)):
for end_idx in xrange(start_idx + 1, len(tokens) + 1):
yield ' '.join(tokens[start_idx:end_idx]), start_idx | python | def _iterate_subsequences(self, tokens):
"""
Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ?
"""
for start_idx in xrange(len(tokens)):
for end_idx in xrange(start_idx + 1, len(tokens) + 1):
yield ' '.join(tokens[start_idx:end_idx]), start_idx | [
"def",
"_iterate_subsequences",
"(",
"self",
",",
"tokens",
")",
":",
"for",
"start_idx",
"in",
"xrange",
"(",
"len",
"(",
"tokens",
")",
")",
":",
"for",
"end_idx",
"in",
"xrange",
"(",
"start_idx",
"+",
"1",
",",
"len",
"(",
"tokens",
")",
"+",
"1"... | Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ? | [
"Using",
"regex",
"invokes",
"this",
"function",
"which",
"significantly",
"impacts",
"performance",
"of",
"adapt",
".",
"it",
"is",
"an",
"N!",
"operation",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/entity_tagger.py#L33-L45 | train | 204,758 |
MycroftAI/adapt | adapt/engine.py | IntentDeterminationEngine.__best_intent | def __best_intent(self, parse_result, context=[]):
"""
Decide the best intent
Args:
parse_result(list): results used to match the best intent.
context(list): ?
Returns:
best_intent, best_tags:
best_intent : The best intent for given results
best_tags : The Tags for result
"""
best_intent = None
best_tags = None
context_as_entities = [{'entities': [c]} for c in context]
for intent in self.intent_parsers:
i, tags = intent.validate_with_tags(parse_result.get('tags') + context_as_entities, parse_result.get('confidence'))
if not best_intent or (i and i.get('confidence') > best_intent.get('confidence')):
best_intent = i
best_tags = tags
return best_intent, best_tags | python | def __best_intent(self, parse_result, context=[]):
"""
Decide the best intent
Args:
parse_result(list): results used to match the best intent.
context(list): ?
Returns:
best_intent, best_tags:
best_intent : The best intent for given results
best_tags : The Tags for result
"""
best_intent = None
best_tags = None
context_as_entities = [{'entities': [c]} for c in context]
for intent in self.intent_parsers:
i, tags = intent.validate_with_tags(parse_result.get('tags') + context_as_entities, parse_result.get('confidence'))
if not best_intent or (i and i.get('confidence') > best_intent.get('confidence')):
best_intent = i
best_tags = tags
return best_intent, best_tags | [
"def",
"__best_intent",
"(",
"self",
",",
"parse_result",
",",
"context",
"=",
"[",
"]",
")",
":",
"best_intent",
"=",
"None",
"best_tags",
"=",
"None",
"context_as_entities",
"=",
"[",
"{",
"'entities'",
":",
"[",
"c",
"]",
"}",
"for",
"c",
"in",
"con... | Decide the best intent
Args:
parse_result(list): results used to match the best intent.
context(list): ?
Returns:
best_intent, best_tags:
best_intent : The best intent for given results
best_tags : The Tags for result | [
"Decide",
"the",
"best",
"intent"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L56-L78 | train | 204,759 |
MycroftAI/adapt | adapt/engine.py | IntentDeterminationEngine.__get_unused_context | def __get_unused_context(self, parse_result, context):
""" Used to get unused context from context. Any keys not in
parse_result
Args:
parse_results(list): parsed results used to identify what keys
in the context are used.
context(list): this is the context used to match with parsed results
keys missing in the parsed results are the unused context
Returns:
list: A list of the unused context results.
"""
tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])
result_context = [c for c in context if c['key'] not in tags_keys]
return result_context | python | def __get_unused_context(self, parse_result, context):
""" Used to get unused context from context. Any keys not in
parse_result
Args:
parse_results(list): parsed results used to identify what keys
in the context are used.
context(list): this is the context used to match with parsed results
keys missing in the parsed results are the unused context
Returns:
list: A list of the unused context results.
"""
tags_keys = set([t['key'] for t in parse_result['tags'] if t['from_context']])
result_context = [c for c in context if c['key'] not in tags_keys]
return result_context | [
"def",
"__get_unused_context",
"(",
"self",
",",
"parse_result",
",",
"context",
")",
":",
"tags_keys",
"=",
"set",
"(",
"[",
"t",
"[",
"'key'",
"]",
"for",
"t",
"in",
"parse_result",
"[",
"'tags'",
"]",
"if",
"t",
"[",
"'from_context'",
"]",
"]",
")",... | Used to get unused context from context. Any keys not in
parse_result
Args:
parse_results(list): parsed results used to identify what keys
in the context are used.
context(list): this is the context used to match with parsed results
keys missing in the parsed results are the unused context
Returns:
list: A list of the unused context results. | [
"Used",
"to",
"get",
"unused",
"context",
"from",
"context",
".",
"Any",
"keys",
"not",
"in",
"parse_result"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L80-L95 | train | 204,760 |
MycroftAI/adapt | adapt/engine.py | IntentDeterminationEngine.register_entity | def register_entity(self, entity_value, entity_type, alias_of=None):
"""
Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
"""
if alias_of:
self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))
else:
self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))
self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept')) | python | def register_entity(self, entity_value, entity_type, alias_of=None):
"""
Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
"""
if alias_of:
self.trie.insert(entity_value.lower(), data=(alias_of, entity_type))
else:
self.trie.insert(entity_value.lower(), data=(entity_value, entity_type))
self.trie.insert(entity_type.lower(), data=(entity_type, 'Concept')) | [
"def",
"register_entity",
"(",
"self",
",",
"entity_value",
",",
"entity_type",
",",
"alias_of",
"=",
"None",
")",
":",
"if",
"alias_of",
":",
"self",
".",
"trie",
".",
"insert",
"(",
"entity_value",
".",
"lower",
"(",
")",
",",
"data",
"=",
"(",
"alia... | Register an entity to be tagged in potential parse results
Args:
entity_value(str): the value/proper name of an entity instance (Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show") | [
"Register",
"an",
"entity",
"to",
"be",
"tagged",
"in",
"potential",
"parse",
"results"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L129-L141 | train | 204,761 |
MycroftAI/adapt | adapt/engine.py | IntentDeterminationEngine.register_intent_parser | def register_intent_parser(self, intent_parser):
"""
"Enforce" the intent parser interface at registration time.
Args:
intent_parser(intent): Intent to be registered.
Raises:
ValueError: on invalid intent
"""
if hasattr(intent_parser, 'validate') and callable(intent_parser.validate):
self.intent_parsers.append(intent_parser)
else:
raise ValueError("%s is not an intent parser" % str(intent_parser)) | python | def register_intent_parser(self, intent_parser):
"""
"Enforce" the intent parser interface at registration time.
Args:
intent_parser(intent): Intent to be registered.
Raises:
ValueError: on invalid intent
"""
if hasattr(intent_parser, 'validate') and callable(intent_parser.validate):
self.intent_parsers.append(intent_parser)
else:
raise ValueError("%s is not an intent parser" % str(intent_parser)) | [
"def",
"register_intent_parser",
"(",
"self",
",",
"intent_parser",
")",
":",
"if",
"hasattr",
"(",
"intent_parser",
",",
"'validate'",
")",
"and",
"callable",
"(",
"intent_parser",
".",
"validate",
")",
":",
"self",
".",
"intent_parsers",
".",
"append",
"(",
... | "Enforce" the intent parser interface at registration time.
Args:
intent_parser(intent): Intent to be registered.
Raises:
ValueError: on invalid intent | [
"Enforce",
"the",
"intent",
"parser",
"interface",
"at",
"registration",
"time",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L155-L168 | train | 204,762 |
MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine.tokenizer | def tokenizer(self):
"""
A property to link into IntentEngine's tokenizer.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains tokenizer from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].tokenizer | python | def tokenizer(self):
"""
A property to link into IntentEngine's tokenizer.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains tokenizer from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].tokenizer | [
"def",
"tokenizer",
"(",
"self",
")",
":",
"domain",
"=",
"0",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",
"domain",
")",
"return",
"self",
".",
"domains",
"[",
"domain",
"]",
".",
"to... | A property to link into IntentEngine's tokenizer.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains tokenizer from its IntentEngine | [
"A",
"property",
"to",
"link",
"into",
"IntentEngine",
"s",
"tokenizer",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L198-L210 | train | 204,763 |
MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine.trie | def trie(self):
"""
A property to link into IntentEngine's trie.
warning:: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains trie from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].trie | python | def trie(self):
"""
A property to link into IntentEngine's trie.
warning:: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains trie from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].trie | [
"def",
"trie",
"(",
"self",
")",
":",
"domain",
"=",
"0",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",
"domain",
")",
"return",
"self",
".",
"domains",
"[",
"domain",
"]",
".",
"trie"
] | A property to link into IntentEngine's trie.
warning:: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Return: the domains trie from its IntentEngine | [
"A",
"property",
"to",
"link",
"into",
"IntentEngine",
"s",
"trie",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L213-L225 | train | 204,764 |
MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine._regex_strings | def _regex_strings(self):
"""
A property to link into IntentEngine's _regex_strings.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains _regex_strings from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain]._regex_strings | python | def _regex_strings(self):
"""
A property to link into IntentEngine's _regex_strings.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains _regex_strings from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain]._regex_strings | [
"def",
"_regex_strings",
"(",
"self",
")",
":",
"domain",
"=",
"0",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",
"domain",
")",
"return",
"self",
".",
"domains",
"[",
"domain",
"]",
".",
... | A property to link into IntentEngine's _regex_strings.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains _regex_strings from its IntentEngine | [
"A",
"property",
"to",
"link",
"into",
"IntentEngine",
"s",
"_regex_strings",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L258-L270 | train | 204,765 |
MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine.regular_expressions_entities | def regular_expressions_entities(self):
"""
A property to link into IntentEngine's regular_expressions_entities.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains regular_expression_entities from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].regular_expressions_entities | python | def regular_expressions_entities(self):
"""
A property to link into IntentEngine's regular_expressions_entities.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains regular_expression_entities from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain].regular_expressions_entities | [
"def",
"regular_expressions_entities",
"(",
"self",
")",
":",
"domain",
"=",
"0",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",
"domain",
")",
"return",
"self",
".",
"domains",
"[",
"domain",
... | A property to link into IntentEngine's regular_expressions_entities.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains regular_expression_entities from its IntentEngine | [
"A",
"property",
"to",
"link",
"into",
"IntentEngine",
"s",
"regular_expressions_entities",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L273-L285 | train | 204,766 |
MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine.register_domain | def register_domain(self, domain=0, tokenizer=None, trie=None):
"""
Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add
"""
self.domains[domain] = IntentDeterminationEngine(
tokenizer=tokenizer, trie=trie) | python | def register_domain(self, domain=0, tokenizer=None, trie=None):
"""
Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add
"""
self.domains[domain] = IntentDeterminationEngine(
tokenizer=tokenizer, trie=trie) | [
"def",
"register_domain",
"(",
"self",
",",
"domain",
"=",
"0",
",",
"tokenizer",
"=",
"None",
",",
"trie",
"=",
"None",
")",
":",
"self",
".",
"domains",
"[",
"domain",
"]",
"=",
"IntentDeterminationEngine",
"(",
"tokenizer",
"=",
"tokenizer",
",",
"tri... | Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add | [
"Register",
"a",
"domain",
"with",
"the",
"intent",
"engine",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L287-L297 | train | 204,767 |
MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine.register_entity | def register_entity(self, entity_value, entity_type, alias_of=None, domain=0):
"""
Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_entity(entity_value=entity_value,
entity_type=entity_type,
alias_of=alias_of) | python | def register_entity(self, entity_value, entity_type, alias_of=None, domain=0):
"""
Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_entity(entity_value=entity_value,
entity_type=entity_type,
alias_of=alias_of) | [
"def",
"register_entity",
"(",
"self",
",",
"entity_value",
",",
"entity_type",
",",
"alias_of",
"=",
"None",
",",
"domain",
"=",
"0",
")",
":",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",... | Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to | [
"Register",
"an",
"entity",
"to",
"be",
"tagged",
"in",
"potential",
"parse",
"results",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L299-L313 | train | 204,768 |
MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine.register_intent_parser | def register_intent_parser(self, intent_parser, domain=0):
"""
Register a intent parser with a domain.
Args:
intent_parser(intent): The intent parser you wish to register.
domain(str): a string representing the domain you wish register the intent
parser to.
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_intent_parser(
intent_parser=intent_parser) | python | def register_intent_parser(self, intent_parser, domain=0):
"""
Register a intent parser with a domain.
Args:
intent_parser(intent): The intent parser you wish to register.
domain(str): a string representing the domain you wish register the intent
parser to.
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_intent_parser(
intent_parser=intent_parser) | [
"def",
"register_intent_parser",
"(",
"self",
",",
"intent_parser",
",",
"domain",
"=",
"0",
")",
":",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",
"domain",
")",
"self",
".",
"domains",
"[... | Register a intent parser with a domain.
Args:
intent_parser(intent): The intent parser you wish to register.
domain(str): a string representing the domain you wish register the intent
parser to. | [
"Register",
"a",
"intent",
"parser",
"with",
"a",
"domain",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L350-L362 | train | 204,769 |
MycroftAI/adapt | adapt/tools/text/tokenizer.py | EnglishTokenizer.tokenize | def tokenize(self, string):
"""Used to parce a string into tokens
This function is to take in a string and return a list of tokens
Args:
string(str): This is a string of words or a sentance to be parsed into tokens
Returns:
list: a list of tokens from the string passed in.
Notes:
Doesn't seem to parse contractions correctly for example don't
would parse as two tokens 'do' and "n't" and this seems to be not
what we would want. Maybe should be "don't" or maybe contractions
should be expanded into "do not" or "do","not". This could be
done with a contraction dictionary and some preprocessing.
"""
s = string
s = re.sub('\t', " ", s)
s = re.sub("(" + regex_separator + ")", " \g<1> ", s)
s = re.sub("([^0-9]),", "\g<1> , ", s)
s = re.sub(",([^0-9])", " , \g<1>", s)
s = re.sub("^(')", "\g<1> ", s)
s = re.sub("(" + regex_not_letter_number + ")'", "\g<1> '", s)
s = re.sub("(" + regex_clitics + ")$", " \g<1>", s)
s = re.sub("(" + regex_clitics + ")(" + regex_not_letter_number + ")", " \g<1> \g<2>", s)
words = s.strip().split()
p1 = re.compile(".*" + regex_letter_number + "\\.")
p2 = re.compile("^([A-Za-z]\\.([A-Za-z]\\.)+|[A-Z][bcdfghj-nptvxz]+\\.)$")
token_list = []
for word in words:
m1 = p1.match(word)
m2 = p2.match(word)
if m1 and word not in abbreviations_list and not m2:
token_list.append(word[0: word.find('.')])
token_list.append(word[word.find('.')])
else:
token_list.append(word)
return token_list | python | def tokenize(self, string):
"""Used to parce a string into tokens
This function is to take in a string and return a list of tokens
Args:
string(str): This is a string of words or a sentance to be parsed into tokens
Returns:
list: a list of tokens from the string passed in.
Notes:
Doesn't seem to parse contractions correctly for example don't
would parse as two tokens 'do' and "n't" and this seems to be not
what we would want. Maybe should be "don't" or maybe contractions
should be expanded into "do not" or "do","not". This could be
done with a contraction dictionary and some preprocessing.
"""
s = string
s = re.sub('\t', " ", s)
s = re.sub("(" + regex_separator + ")", " \g<1> ", s)
s = re.sub("([^0-9]),", "\g<1> , ", s)
s = re.sub(",([^0-9])", " , \g<1>", s)
s = re.sub("^(')", "\g<1> ", s)
s = re.sub("(" + regex_not_letter_number + ")'", "\g<1> '", s)
s = re.sub("(" + regex_clitics + ")$", " \g<1>", s)
s = re.sub("(" + regex_clitics + ")(" + regex_not_letter_number + ")", " \g<1> \g<2>", s)
words = s.strip().split()
p1 = re.compile(".*" + regex_letter_number + "\\.")
p2 = re.compile("^([A-Za-z]\\.([A-Za-z]\\.)+|[A-Z][bcdfghj-nptvxz]+\\.)$")
token_list = []
for word in words:
m1 = p1.match(word)
m2 = p2.match(word)
if m1 and word not in abbreviations_list and not m2:
token_list.append(word[0: word.find('.')])
token_list.append(word[word.find('.')])
else:
token_list.append(word)
return token_list | [
"def",
"tokenize",
"(",
"self",
",",
"string",
")",
":",
"s",
"=",
"string",
"s",
"=",
"re",
".",
"sub",
"(",
"'\\t'",
",",
"\" \"",
",",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"\"(\"",
"+",
"regex_separator",
"+",
"\")\"",
",",
"\" \\g<1> \"... | Used to parce a string into tokens
This function is to take in a string and return a list of tokens
Args:
string(str): This is a string of words or a sentance to be parsed into tokens
Returns:
list: a list of tokens from the string passed in.
Notes:
Doesn't seem to parse contractions correctly for example don't
would parse as two tokens 'do' and "n't" and this seems to be not
what we would want. Maybe should be "don't" or maybe contractions
should be expanded into "do not" or "do","not". This could be
done with a contraction dictionary and some preprocessing. | [
"Used",
"to",
"parce",
"a",
"string",
"into",
"tokens"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/tools/text/tokenizer.py#L38-L82 | train | 204,770 |
MycroftAI/adapt | adapt/parser.py | Parser.parse | def parse(self, utterance, context=None, N=1):
"""Used to find tags within utterance with a given confidence
Args:
utterance(str): conversational piece given by the user
context(list): a list of entities
N(int): number of results
Returns: yield an object with the following fields
utterance(str): the value passed in
tags(list) : a list of tags found in utterance
time(time) : duration since call of function
confidence(float) : float indicating how confident of a match to the
utterance. This might be used to determan the most likely intent.
"""
start = time.time()
context_trie = None
if context and isinstance(context, list):
# sort by confidence in ascending order, so
# highest confidence for an entity is last.
# see comment on TrieNode ctor
context.sort(key=lambda x: x.get('confidence'))
context_trie = Trie()
for entity in context:
entity_value, entity_type = entity.get('data')[0]
context_trie.insert(entity_value.lower(),
data=(entity_value, entity_type),
weight=entity.get('confidence'))
tagged = self._tagger.tag(utterance.lower(), context_trie=context_trie)
self.emit("tagged_entities",
{
'utterance': utterance,
'tags': list(tagged),
'time': time.time() - start
})
start = time.time()
bke = BronKerboschExpander(self._tokenizer)
def score_clique(clique):
score = 0.0
for tagged_entity in clique:
ec = tagged_entity.get('entities', [{'confidence': 0.0}])[0].get('confidence')
score += ec * len(tagged_entity.get('entities', [{'match': ''}])[0].get('match')) / (
len(utterance) + 1)
return score
parse_results = bke.expand(tagged, clique_scoring_func=score_clique)
count = 0
for result in parse_results:
count += 1
parse_confidence = 0.0
for tag in result:
sample_entity = tag['entities'][0]
entity_confidence = sample_entity.get('confidence', 0.0) * float(
len(sample_entity.get('match'))) / len(utterance)
parse_confidence += entity_confidence
yield {
'utterance': utterance,
'tags': result,
'time': time.time() - start,
'confidence': parse_confidence
}
if count >= N:
break | python | def parse(self, utterance, context=None, N=1):
"""Used to find tags within utterance with a given confidence
Args:
utterance(str): conversational piece given by the user
context(list): a list of entities
N(int): number of results
Returns: yield an object with the following fields
utterance(str): the value passed in
tags(list) : a list of tags found in utterance
time(time) : duration since call of function
confidence(float) : float indicating how confident of a match to the
utterance. This might be used to determan the most likely intent.
"""
start = time.time()
context_trie = None
if context and isinstance(context, list):
# sort by confidence in ascending order, so
# highest confidence for an entity is last.
# see comment on TrieNode ctor
context.sort(key=lambda x: x.get('confidence'))
context_trie = Trie()
for entity in context:
entity_value, entity_type = entity.get('data')[0]
context_trie.insert(entity_value.lower(),
data=(entity_value, entity_type),
weight=entity.get('confidence'))
tagged = self._tagger.tag(utterance.lower(), context_trie=context_trie)
self.emit("tagged_entities",
{
'utterance': utterance,
'tags': list(tagged),
'time': time.time() - start
})
start = time.time()
bke = BronKerboschExpander(self._tokenizer)
def score_clique(clique):
score = 0.0
for tagged_entity in clique:
ec = tagged_entity.get('entities', [{'confidence': 0.0}])[0].get('confidence')
score += ec * len(tagged_entity.get('entities', [{'match': ''}])[0].get('match')) / (
len(utterance) + 1)
return score
parse_results = bke.expand(tagged, clique_scoring_func=score_clique)
count = 0
for result in parse_results:
count += 1
parse_confidence = 0.0
for tag in result:
sample_entity = tag['entities'][0]
entity_confidence = sample_entity.get('confidence', 0.0) * float(
len(sample_entity.get('match'))) / len(utterance)
parse_confidence += entity_confidence
yield {
'utterance': utterance,
'tags': result,
'time': time.time() - start,
'confidence': parse_confidence
}
if count >= N:
break | [
"def",
"parse",
"(",
"self",
",",
"utterance",
",",
"context",
"=",
"None",
",",
"N",
"=",
"1",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"context_trie",
"=",
"None",
"if",
"context",
"and",
"isinstance",
"(",
"context",
",",
"list",
"... | Used to find tags within utterance with a given confidence
Args:
utterance(str): conversational piece given by the user
context(list): a list of entities
N(int): number of results
Returns: yield an object with the following fields
utterance(str): the value passed in
tags(list) : a list of tags found in utterance
time(time) : duration since call of function
confidence(float) : float indicating how confident of a match to the
utterance. This might be used to determan the most likely intent. | [
"Used",
"to",
"find",
"tags",
"within",
"utterance",
"with",
"a",
"given",
"confidence"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/parser.py#L33-L99 | train | 204,771 |
MycroftAI/adapt | adapt/context.py | ContextManagerFrame.metadata_matches | def metadata_matches(self, query={}):
"""
Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: if key count in query is <= 0 or any key in query not
found in self.metadata
"""
result = len(query.keys()) > 0
for key in query.keys():
result = result and query[key] == self.metadata.get(key)
return result | python | def metadata_matches(self, query={}):
"""
Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: if key count in query is <= 0 or any key in query not
found in self.metadata
"""
result = len(query.keys()) > 0
for key in query.keys():
result = result and query[key] == self.metadata.get(key)
return result | [
"def",
"metadata_matches",
"(",
"self",
",",
"query",
"=",
"{",
"}",
")",
":",
"result",
"=",
"len",
"(",
"query",
".",
"keys",
"(",
")",
")",
">",
"0",
"for",
"key",
"in",
"query",
".",
"keys",
"(",
")",
":",
"result",
"=",
"result",
"and",
"q... | Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: if key count in query is <= 0 or any key in query not
found in self.metadata | [
"Returns",
"key",
"matches",
"to",
"metadata"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/context.py#L48-L71 | train | 204,772 |
MycroftAI/adapt | adapt/context.py | ContextManagerFrame.merge_context | def merge_context(self, tag, metadata):
"""
merge into contextManagerFrame new entity and metadata.
Appends tag as new entity and adds keys in metadata to keys in
self.metadata.
Args:
tag(str): entity to be added to self.entities
metadata(object): metadata containes keys to be added to self.metadata
"""
self.entities.append(tag)
for k in metadata.keys():
if k not in self.metadata:
self.metadata[k] = k | python | def merge_context(self, tag, metadata):
"""
merge into contextManagerFrame new entity and metadata.
Appends tag as new entity and adds keys in metadata to keys in
self.metadata.
Args:
tag(str): entity to be added to self.entities
metadata(object): metadata containes keys to be added to self.metadata
"""
self.entities.append(tag)
for k in metadata.keys():
if k not in self.metadata:
self.metadata[k] = k | [
"def",
"merge_context",
"(",
"self",
",",
"tag",
",",
"metadata",
")",
":",
"self",
".",
"entities",
".",
"append",
"(",
"tag",
")",
"for",
"k",
"in",
"metadata",
".",
"keys",
"(",
")",
":",
"if",
"k",
"not",
"in",
"self",
".",
"metadata",
":",
"... | merge into contextManagerFrame new entity and metadata.
Appends tag as new entity and adds keys in metadata to keys in
self.metadata.
Args:
tag(str): entity to be added to self.entities
metadata(object): metadata containes keys to be added to self.metadata | [
"merge",
"into",
"contextManagerFrame",
"new",
"entity",
"and",
"metadata",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/context.py#L73-L87 | train | 204,773 |
MycroftAI/adapt | adapt/context.py | ContextManager.get_context | def get_context(self, max_frames=None, missing_entities=[]):
"""
Constructs a list of entities from the context.
Args:
max_frames(int): maximum number of frames to look back
missing_entities(list of str): a list or set of tag names, as strings
Returns:
list: a list of entities
"""
if not max_frames or max_frames > len(self.frame_stack):
max_frames = len(self.frame_stack)
missing_entities = list(missing_entities)
context = []
for i in xrange(max_frames):
frame_entities = [entity.copy() for entity in self.frame_stack[i].entities]
for entity in frame_entities:
entity['confidence'] = entity.get('confidence', 1.0) / (2.0 + i)
context += frame_entities
result = []
if len(missing_entities) > 0:
for entity in context:
if entity.get('data') in missing_entities:
result.append(entity)
# NOTE: this implies that we will only ever get one
# of an entity kind from context, unless specified
# multiple times in missing_entities. Cannot get
# an arbitrary number of an entity kind.
missing_entities.remove(entity.get('data'))
else:
result = context
return result | python | def get_context(self, max_frames=None, missing_entities=[]):
"""
Constructs a list of entities from the context.
Args:
max_frames(int): maximum number of frames to look back
missing_entities(list of str): a list or set of tag names, as strings
Returns:
list: a list of entities
"""
if not max_frames or max_frames > len(self.frame_stack):
max_frames = len(self.frame_stack)
missing_entities = list(missing_entities)
context = []
for i in xrange(max_frames):
frame_entities = [entity.copy() for entity in self.frame_stack[i].entities]
for entity in frame_entities:
entity['confidence'] = entity.get('confidence', 1.0) / (2.0 + i)
context += frame_entities
result = []
if len(missing_entities) > 0:
for entity in context:
if entity.get('data') in missing_entities:
result.append(entity)
# NOTE: this implies that we will only ever get one
# of an entity kind from context, unless specified
# multiple times in missing_entities. Cannot get
# an arbitrary number of an entity kind.
missing_entities.remove(entity.get('data'))
else:
result = context
return result | [
"def",
"get_context",
"(",
"self",
",",
"max_frames",
"=",
"None",
",",
"missing_entities",
"=",
"[",
"]",
")",
":",
"if",
"not",
"max_frames",
"or",
"max_frames",
">",
"len",
"(",
"self",
".",
"frame_stack",
")",
":",
"max_frames",
"=",
"len",
"(",
"s... | Constructs a list of entities from the context.
Args:
max_frames(int): maximum number of frames to look back
missing_entities(list of str): a list or set of tag names, as strings
Returns:
list: a list of entities | [
"Constructs",
"a",
"list",
"of",
"entities",
"from",
"the",
"context",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/context.py#L116-L151 | train | 204,774 |
MycroftAI/adapt | adapt/intent.py | find_first_tag | def find_first_tag(tags, entity_type, after_index=-1):
"""Searches tags for entity type after given index
Args:
tags(list): a list of tags with entity types to be compaired too entity_type
entity_type(str): This is he entity type to be looking for in tags
after_index(int): the start token must be greaterthan this.
Returns:
( tag, v, confidence ):
tag(str): is the tag that matched
v(str): ? the word that matched?
confidence(float): is a mesure of accuacy. 1 is full confidence and 0 is none.
"""
for tag in tags:
for entity in tag.get('entities'):
for v, t in entity.get('data'):
if t.lower() == entity_type.lower() and tag.get('start_token', 0) > after_index:
return tag, v, entity.get('confidence')
return None, None, None | python | def find_first_tag(tags, entity_type, after_index=-1):
"""Searches tags for entity type after given index
Args:
tags(list): a list of tags with entity types to be compaired too entity_type
entity_type(str): This is he entity type to be looking for in tags
after_index(int): the start token must be greaterthan this.
Returns:
( tag, v, confidence ):
tag(str): is the tag that matched
v(str): ? the word that matched?
confidence(float): is a mesure of accuacy. 1 is full confidence and 0 is none.
"""
for tag in tags:
for entity in tag.get('entities'):
for v, t in entity.get('data'):
if t.lower() == entity_type.lower() and tag.get('start_token', 0) > after_index:
return tag, v, entity.get('confidence')
return None, None, None | [
"def",
"find_first_tag",
"(",
"tags",
",",
"entity_type",
",",
"after_index",
"=",
"-",
"1",
")",
":",
"for",
"tag",
"in",
"tags",
":",
"for",
"entity",
"in",
"tag",
".",
"get",
"(",
"'entities'",
")",
":",
"for",
"v",
",",
"t",
"in",
"entity",
"."... | Searches tags for entity type after given index
Args:
tags(list): a list of tags with entity types to be compaired too entity_type
entity_type(str): This is he entity type to be looking for in tags
after_index(int): the start token must be greaterthan this.
Returns:
( tag, v, confidence ):
tag(str): is the tag that matched
v(str): ? the word that matched?
confidence(float): is a mesure of accuacy. 1 is full confidence and 0 is none. | [
"Searches",
"tags",
"for",
"entity",
"type",
"after",
"given",
"index"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/intent.py#L29-L49 | train | 204,775 |
MycroftAI/adapt | adapt/intent.py | choose_1_from_each | def choose_1_from_each(lists):
"""Takes a list of lists and returns a list of lists with one item
from each list. This new list should be the length of each list multiplied
by the others. 18 for an list with lists of 3, 2 and 3. Also the lenght
of each sub list should be same as the length of lists passed in.
Args:
lists(list of Lists): A list of lists
Returns:
list of lists: returns a list of lists constructions of one item from each
list in lists.
"""
if len(lists) == 0:
yield []
else:
for el in lists[0]:
for next_list in choose_1_from_each(lists[1:]):
yield [el] + next_list | python | def choose_1_from_each(lists):
"""Takes a list of lists and returns a list of lists with one item
from each list. This new list should be the length of each list multiplied
by the others. 18 for an list with lists of 3, 2 and 3. Also the lenght
of each sub list should be same as the length of lists passed in.
Args:
lists(list of Lists): A list of lists
Returns:
list of lists: returns a list of lists constructions of one item from each
list in lists.
"""
if len(lists) == 0:
yield []
else:
for el in lists[0]:
for next_list in choose_1_from_each(lists[1:]):
yield [el] + next_list | [
"def",
"choose_1_from_each",
"(",
"lists",
")",
":",
"if",
"len",
"(",
"lists",
")",
"==",
"0",
":",
"yield",
"[",
"]",
"else",
":",
"for",
"el",
"in",
"lists",
"[",
"0",
"]",
":",
"for",
"next_list",
"in",
"choose_1_from_each",
"(",
"lists",
"[",
... | Takes a list of lists and returns a list of lists with one item
from each list. This new list should be the length of each list multiplied
by the others. 18 for an list with lists of 3, 2 and 3. Also the lenght
of each sub list should be same as the length of lists passed in.
Args:
lists(list of Lists): A list of lists
Returns:
list of lists: returns a list of lists constructions of one item from each
list in lists. | [
"Takes",
"a",
"list",
"of",
"lists",
"and",
"returns",
"a",
"list",
"of",
"lists",
"with",
"one",
"item",
"from",
"each",
"list",
".",
"This",
"new",
"list",
"should",
"be",
"the",
"length",
"of",
"each",
"list",
"multiplied",
"by",
"the",
"others",
".... | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/intent.py#L59-L77 | train | 204,776 |
MycroftAI/adapt | adapt/intent.py | resolve_one_of | def resolve_one_of(tags, at_least_one):
"""This searches tags for Entites in at_least_one and returns any match
Args:
tags(list): List of tags with Entities to search for Entities
at_least_one(list): List of Entities to find in tags
Returns:
object: returns None if no match is found but returns any match as an object
"""
if len(tags) < len(at_least_one):
return None
for possible_resolution in choose_1_from_each(at_least_one):
resolution = {}
pr = possible_resolution[:]
for entity_type in pr:
last_end_index = -1
if entity_type in resolution:
last_end_index = resolution.get[entity_type][-1].get('end_token')
tag, value, c = find_first_tag(tags, entity_type, after_index=last_end_index)
if not tag:
break
else:
if entity_type not in resolution:
resolution[entity_type] = []
resolution[entity_type].append(tag)
if len(resolution) == len(possible_resolution):
return resolution
return None | python | def resolve_one_of(tags, at_least_one):
"""This searches tags for Entites in at_least_one and returns any match
Args:
tags(list): List of tags with Entities to search for Entities
at_least_one(list): List of Entities to find in tags
Returns:
object: returns None if no match is found but returns any match as an object
"""
if len(tags) < len(at_least_one):
return None
for possible_resolution in choose_1_from_each(at_least_one):
resolution = {}
pr = possible_resolution[:]
for entity_type in pr:
last_end_index = -1
if entity_type in resolution:
last_end_index = resolution.get[entity_type][-1].get('end_token')
tag, value, c = find_first_tag(tags, entity_type, after_index=last_end_index)
if not tag:
break
else:
if entity_type not in resolution:
resolution[entity_type] = []
resolution[entity_type].append(tag)
if len(resolution) == len(possible_resolution):
return resolution
return None | [
"def",
"resolve_one_of",
"(",
"tags",
",",
"at_least_one",
")",
":",
"if",
"len",
"(",
"tags",
")",
"<",
"len",
"(",
"at_least_one",
")",
":",
"return",
"None",
"for",
"possible_resolution",
"in",
"choose_1_from_each",
"(",
"at_least_one",
")",
":",
"resolut... | This searches tags for Entites in at_least_one and returns any match
Args:
tags(list): List of tags with Entities to search for Entities
at_least_one(list): List of Entities to find in tags
Returns:
object: returns None if no match is found but returns any match as an object | [
"This",
"searches",
"tags",
"for",
"Entites",
"in",
"at_least_one",
"and",
"returns",
"any",
"match"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/intent.py#L80-L109 | train | 204,777 |
MycroftAI/adapt | adapt/intent.py | Intent.validate | def validate(self, tags, confidence):
"""Using this method removes tags from the result of validate_with_tags
Returns:
intent(intent): Resuts from validate_with_tags
"""
intent, tags = self.validate_with_tags(tags, confidence)
return intent | python | def validate(self, tags, confidence):
"""Using this method removes tags from the result of validate_with_tags
Returns:
intent(intent): Resuts from validate_with_tags
"""
intent, tags = self.validate_with_tags(tags, confidence)
return intent | [
"def",
"validate",
"(",
"self",
",",
"tags",
",",
"confidence",
")",
":",
"intent",
",",
"tags",
"=",
"self",
".",
"validate_with_tags",
"(",
"tags",
",",
"confidence",
")",
"return",
"intent"
] | Using this method removes tags from the result of validate_with_tags
Returns:
intent(intent): Resuts from validate_with_tags | [
"Using",
"this",
"method",
"removes",
"tags",
"from",
"the",
"result",
"of",
"validate_with_tags"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/intent.py#L127-L134 | train | 204,778 |
MycroftAI/adapt | adapt/intent.py | Intent.validate_with_tags | def validate_with_tags(self, tags, confidence):
"""Validate weather tags has required entites for this intent to fire
Args:
tags(list): Tags and Entities used for validation
confidence(float): ?
Returns:
intent, tags: Returns intent and tags used by the intent on
falure to meat required entities then returns intent with confidence
of 0.0 and an empty list for tags.
"""
result = {'intent_type': self.name}
intent_confidence = 0.0
local_tags = tags[:]
used_tags = []
for require_type, attribute_name in self.requires:
required_tag, canonical_form, confidence = find_first_tag(local_tags, require_type)
if not required_tag:
result['confidence'] = 0.0
return result, []
result[attribute_name] = canonical_form
if required_tag in local_tags:
local_tags.remove(required_tag)
used_tags.append(required_tag)
# TODO: use confidence based on edit distance and context
intent_confidence += confidence
if len(self.at_least_one) > 0:
best_resolution = resolve_one_of(tags, self.at_least_one)
if not best_resolution:
result['confidence'] = 0.0
return result, []
else:
for key in best_resolution:
result[key] = best_resolution[key][0].get('key') # TODO: at least one must support aliases
intent_confidence += 1.0
used_tags.append(best_resolution)
if best_resolution in local_tags:
local_tags.remove(best_resolution)
for optional_type, attribute_name in self.optional:
optional_tag, canonical_form, conf = find_first_tag(local_tags, optional_type)
if not optional_tag or attribute_name in result:
continue
result[attribute_name] = canonical_form
if optional_tag in local_tags:
local_tags.remove(optional_tag)
used_tags.append(optional_tag)
intent_confidence += 1.0
total_confidence = intent_confidence / len(tags) * confidence
target_client, canonical_form, confidence = find_first_tag(local_tags, CLIENT_ENTITY_NAME)
result['target'] = target_client.get('key') if target_client else None
result['confidence'] = total_confidence
return result, used_tags | python | def validate_with_tags(self, tags, confidence):
"""Validate weather tags has required entites for this intent to fire
Args:
tags(list): Tags and Entities used for validation
confidence(float): ?
Returns:
intent, tags: Returns intent and tags used by the intent on
falure to meat required entities then returns intent with confidence
of 0.0 and an empty list for tags.
"""
result = {'intent_type': self.name}
intent_confidence = 0.0
local_tags = tags[:]
used_tags = []
for require_type, attribute_name in self.requires:
required_tag, canonical_form, confidence = find_first_tag(local_tags, require_type)
if not required_tag:
result['confidence'] = 0.0
return result, []
result[attribute_name] = canonical_form
if required_tag in local_tags:
local_tags.remove(required_tag)
used_tags.append(required_tag)
# TODO: use confidence based on edit distance and context
intent_confidence += confidence
if len(self.at_least_one) > 0:
best_resolution = resolve_one_of(tags, self.at_least_one)
if not best_resolution:
result['confidence'] = 0.0
return result, []
else:
for key in best_resolution:
result[key] = best_resolution[key][0].get('key') # TODO: at least one must support aliases
intent_confidence += 1.0
used_tags.append(best_resolution)
if best_resolution in local_tags:
local_tags.remove(best_resolution)
for optional_type, attribute_name in self.optional:
optional_tag, canonical_form, conf = find_first_tag(local_tags, optional_type)
if not optional_tag or attribute_name in result:
continue
result[attribute_name] = canonical_form
if optional_tag in local_tags:
local_tags.remove(optional_tag)
used_tags.append(optional_tag)
intent_confidence += 1.0
total_confidence = intent_confidence / len(tags) * confidence
target_client, canonical_form, confidence = find_first_tag(local_tags, CLIENT_ENTITY_NAME)
result['target'] = target_client.get('key') if target_client else None
result['confidence'] = total_confidence
return result, used_tags | [
"def",
"validate_with_tags",
"(",
"self",
",",
"tags",
",",
"confidence",
")",
":",
"result",
"=",
"{",
"'intent_type'",
":",
"self",
".",
"name",
"}",
"intent_confidence",
"=",
"0.0",
"local_tags",
"=",
"tags",
"[",
":",
"]",
"used_tags",
"=",
"[",
"]",... | Validate weather tags has required entites for this intent to fire
Args:
tags(list): Tags and Entities used for validation
confidence(float): ?
Returns:
intent, tags: Returns intent and tags used by the intent on
falure to meat required entities then returns intent with confidence
of 0.0 and an empty list for tags. | [
"Validate",
"weather",
"tags",
"has",
"required",
"entites",
"for",
"this",
"intent",
"to",
"fire"
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/intent.py#L136-L196 | train | 204,779 |
MycroftAI/adapt | adapt/intent.py | IntentBuilder.require | def require(self, entity_type, attribute_name=None):
"""
The intent parser should require an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications.
"""
if not attribute_name:
attribute_name = entity_type
self.requires += [(entity_type, attribute_name)]
return self | python | def require(self, entity_type, attribute_name=None):
"""
The intent parser should require an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications.
"""
if not attribute_name:
attribute_name = entity_type
self.requires += [(entity_type, attribute_name)]
return self | [
"def",
"require",
"(",
"self",
",",
"entity_type",
",",
"attribute_name",
"=",
"None",
")",
":",
"if",
"not",
"attribute_name",
":",
"attribute_name",
"=",
"entity_type",
"self",
".",
"requires",
"+=",
"[",
"(",
"entity_type",
",",
"attribute_name",
")",
"]"... | The intent parser should require an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications. | [
"The",
"intent",
"parser",
"should",
"require",
"an",
"entity",
"of",
"the",
"provided",
"type",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/intent.py#L242-L256 | train | 204,780 |
MycroftAI/adapt | adapt/intent.py | IntentBuilder.optionally | def optionally(self, entity_type, attribute_name=None):
"""
Parsed intents from this parser can optionally include an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications.
"""
if not attribute_name:
attribute_name = entity_type
self.optional += [(entity_type, attribute_name)]
return self | python | def optionally(self, entity_type, attribute_name=None):
"""
Parsed intents from this parser can optionally include an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications.
"""
if not attribute_name:
attribute_name = entity_type
self.optional += [(entity_type, attribute_name)]
return self | [
"def",
"optionally",
"(",
"self",
",",
"entity_type",
",",
"attribute_name",
"=",
"None",
")",
":",
"if",
"not",
"attribute_name",
":",
"attribute_name",
"=",
"entity_type",
"self",
".",
"optional",
"+=",
"[",
"(",
"entity_type",
",",
"attribute_name",
")",
... | Parsed intents from this parser can optionally include an entity of the provided type.
Args:
entity_type(str): an entity type
attribute_name(str): the name of the attribute on the parsed intent. Defaults to match entity_type.
Returns:
self: to continue modifications. | [
"Parsed",
"intents",
"from",
"this",
"parser",
"can",
"optionally",
"include",
"an",
"entity",
"of",
"the",
"provided",
"type",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/intent.py#L258-L272 | train | 204,781 |
MycroftAI/adapt | adapt/intent.py | IntentBuilder.build | def build(self):
"""
Constructs an intent from the builder's specifications.
:return: an Intent instance.
"""
return Intent(self.name, self.requires, self.at_least_one, self.optional) | python | def build(self):
"""
Constructs an intent from the builder's specifications.
:return: an Intent instance.
"""
return Intent(self.name, self.requires, self.at_least_one, self.optional) | [
"def",
"build",
"(",
"self",
")",
":",
"return",
"Intent",
"(",
"self",
".",
"name",
",",
"self",
".",
"requires",
",",
"self",
".",
"at_least_one",
",",
"self",
".",
"optional",
")"
] | Constructs an intent from the builder's specifications.
:return: an Intent instance. | [
"Constructs",
"an",
"intent",
"from",
"the",
"builder",
"s",
"specifications",
"."
] | 334f23248b8e09fb9d84a88398424ec5bd3bae4c | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/intent.py#L274-L280 | train | 204,782 |
biolink/ontobio | ontobio/golr/golr_matrix.py | term_matrix | def term_matrix(idlist, subject_category, taxon, **kwargs):
"""
Intersection between annotated objects
P1 not(P1)
F1 0 5
not(F1) 6 0
"""
results = search_associations(objects=idlist,
subject_taxon=taxon,
subject_category=subject_category,
select_fields=[M.SUBJECT, M.OBJECT_CLOSURE],
facet_fields=[],
rows=-1,
include_raw=True,
**kwargs)
docs = results['raw'].docs
subjects_per_term = {}
smap = {}
for d in docs:
smap[d[M.SUBJECT]] = 1
for c in d[M.OBJECT_CLOSURE]:
if c in idlist:
if c not in subjects_per_term:
subjects_per_term[c] = []
subjects_per_term[c].append(d[M.SUBJECT])
pop_n = len(smap.keys())
cells = []
for cx in idlist:
csubjs = set(subjects_per_term[cx])
for dx in idlist:
dsubjs = set(subjects_per_term[dx])
a = len(csubjs.intersection(dsubjs))
b = len(csubjs) - a
c = len(dsubjs) - a
d = pop_n - len(dsubjs) - b
ctable = [[a, b], [c, d]]
_, p_under = sp.stats.fisher_exact(ctable, 'less')
_, p_over = sp.stats.fisher_exact(ctable, 'greater')
cells.append({'c':cx, 'd':dx,
'nc':len(csubjs),
'nd':len(dsubjs),
'n':a,
'p_l':p_under,
'p_g':p_over
})
return cells | python | def term_matrix(idlist, subject_category, taxon, **kwargs):
"""
Intersection between annotated objects
P1 not(P1)
F1 0 5
not(F1) 6 0
"""
results = search_associations(objects=idlist,
subject_taxon=taxon,
subject_category=subject_category,
select_fields=[M.SUBJECT, M.OBJECT_CLOSURE],
facet_fields=[],
rows=-1,
include_raw=True,
**kwargs)
docs = results['raw'].docs
subjects_per_term = {}
smap = {}
for d in docs:
smap[d[M.SUBJECT]] = 1
for c in d[M.OBJECT_CLOSURE]:
if c in idlist:
if c not in subjects_per_term:
subjects_per_term[c] = []
subjects_per_term[c].append(d[M.SUBJECT])
pop_n = len(smap.keys())
cells = []
for cx in idlist:
csubjs = set(subjects_per_term[cx])
for dx in idlist:
dsubjs = set(subjects_per_term[dx])
a = len(csubjs.intersection(dsubjs))
b = len(csubjs) - a
c = len(dsubjs) - a
d = pop_n - len(dsubjs) - b
ctable = [[a, b], [c, d]]
_, p_under = sp.stats.fisher_exact(ctable, 'less')
_, p_over = sp.stats.fisher_exact(ctable, 'greater')
cells.append({'c':cx, 'd':dx,
'nc':len(csubjs),
'nd':len(dsubjs),
'n':a,
'p_l':p_under,
'p_g':p_over
})
return cells | [
"def",
"term_matrix",
"(",
"idlist",
",",
"subject_category",
",",
"taxon",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"search_associations",
"(",
"objects",
"=",
"idlist",
",",
"subject_taxon",
"=",
"taxon",
",",
"subject_category",
"=",
"subject_cate... | Intersection between annotated objects
P1 not(P1)
F1 0 5
not(F1) 6 0 | [
"Intersection",
"between",
"annotated",
"objects"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_matrix.py#L17-L67 | train | 204,783 |
biolink/ontobio | ontobio/util/go_utils.py | GoAspector.get_ancestors_through_subont | def get_ancestors_through_subont(self, go_term, relations):
"""
Returns the ancestors from the relation filtered GO subontology of go_term's ancestors.
subontology() primarily used here for speed when specifying relations to traverse. Point of this is to first get
a smaller graph (all ancestors of go_term regardless of relation) and then filter relations on that instead of
the whole GO.
"""
all_ancestors = self.ontology.ancestors(go_term, reflexive=True)
subont = self.ontology.subontology(all_ancestors)
return subont.ancestors(go_term, relations) | python | def get_ancestors_through_subont(self, go_term, relations):
"""
Returns the ancestors from the relation filtered GO subontology of go_term's ancestors.
subontology() primarily used here for speed when specifying relations to traverse. Point of this is to first get
a smaller graph (all ancestors of go_term regardless of relation) and then filter relations on that instead of
the whole GO.
"""
all_ancestors = self.ontology.ancestors(go_term, reflexive=True)
subont = self.ontology.subontology(all_ancestors)
return subont.ancestors(go_term, relations) | [
"def",
"get_ancestors_through_subont",
"(",
"self",
",",
"go_term",
",",
"relations",
")",
":",
"all_ancestors",
"=",
"self",
".",
"ontology",
".",
"ancestors",
"(",
"go_term",
",",
"reflexive",
"=",
"True",
")",
"subont",
"=",
"self",
".",
"ontology",
".",
... | Returns the ancestors from the relation filtered GO subontology of go_term's ancestors.
subontology() primarily used here for speed when specifying relations to traverse. Point of this is to first get
a smaller graph (all ancestors of go_term regardless of relation) and then filter relations on that instead of
the whole GO. | [
"Returns",
"the",
"ancestors",
"from",
"the",
"relation",
"filtered",
"GO",
"subontology",
"of",
"go_term",
"s",
"ancestors",
"."
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/util/go_utils.py#L11-L21 | train | 204,784 |
biolink/ontobio | ontobio/util/go_utils.py | GoAspector.go_aspect | def go_aspect(self, go_term):
"""
For GO terms, returns F, C, or P corresponding to its aspect
"""
if not go_term.startswith("GO:"):
return None
else:
# Check ancestors for root terms
if self.is_molecular_function(go_term):
return 'F'
elif self.is_cellular_component(go_term):
return 'C'
elif self.is_biological_process(go_term):
return 'P' | python | def go_aspect(self, go_term):
"""
For GO terms, returns F, C, or P corresponding to its aspect
"""
if not go_term.startswith("GO:"):
return None
else:
# Check ancestors for root terms
if self.is_molecular_function(go_term):
return 'F'
elif self.is_cellular_component(go_term):
return 'C'
elif self.is_biological_process(go_term):
return 'P' | [
"def",
"go_aspect",
"(",
"self",
",",
"go_term",
")",
":",
"if",
"not",
"go_term",
".",
"startswith",
"(",
"\"GO:\"",
")",
":",
"return",
"None",
"else",
":",
"# Check ancestors for root terms",
"if",
"self",
".",
"is_molecular_function",
"(",
"go_term",
")",
... | For GO terms, returns F, C, or P corresponding to its aspect | [
"For",
"GO",
"terms",
"returns",
"F",
"C",
"or",
"P",
"corresponding",
"to",
"its",
"aspect"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/util/go_utils.py#L68-L81 | train | 204,785 |
biolink/ontobio | ontobio/neo/scigraph_ontology.py | RemoteScigraphOntology._neighbors_graph | def _neighbors_graph(self, **params) -> Dict:
"""
Get neighbors of a node
parameters are directly passed through to SciGraph: e.g. depth, relationshipType
"""
response = self._get_response("graph/neighbors", format="json", **params)
return response.json() | python | def _neighbors_graph(self, **params) -> Dict:
"""
Get neighbors of a node
parameters are directly passed through to SciGraph: e.g. depth, relationshipType
"""
response = self._get_response("graph/neighbors", format="json", **params)
return response.json() | [
"def",
"_neighbors_graph",
"(",
"self",
",",
"*",
"*",
"params",
")",
"->",
"Dict",
":",
"response",
"=",
"self",
".",
"_get_response",
"(",
"\"graph/neighbors\"",
",",
"format",
"=",
"\"json\"",
",",
"*",
"*",
"params",
")",
"return",
"response",
".",
"... | Get neighbors of a node
parameters are directly passed through to SciGraph: e.g. depth, relationshipType | [
"Get",
"neighbors",
"of",
"a",
"node"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/neo/scigraph_ontology.py#L89-L96 | train | 204,786 |
biolink/ontobio | ontobio/sparql/rdflib_bridge.py | rdfgraph_to_ontol | def rdfgraph_to_ontol(rg):
"""
Return an Ontology object from an rdflib graph object
Status: Incomplete
"""
digraph = networkx.MultiDiGraph()
from rdflib.namespace import RDF
label_map = {}
for c in rg.subjects(RDF.type, OWL.Class):
cid = contract_uri_wrap(c)
logging.info("C={}".format(cid))
for lit in rg.objects(c, RDFS.label):
label_map[cid] = lit.value
digraph.add_node(cid, label=lit.value)
for s in rg.objects(c, RDFS.subClassOf):
# todo - blank nodes
sid = contract_uri_wrap(s)
digraph.add_edge(sid, cid, pred='subClassOf')
logging.info("G={}".format(digraph))
payload = {
'graph': digraph,
#'xref_graph': xref_graph,
#'graphdoc': obographdoc,
#'logical_definitions': logical_definitions
}
ont = Ontology(handle='wd', payload=payload)
return ont | python | def rdfgraph_to_ontol(rg):
"""
Return an Ontology object from an rdflib graph object
Status: Incomplete
"""
digraph = networkx.MultiDiGraph()
from rdflib.namespace import RDF
label_map = {}
for c in rg.subjects(RDF.type, OWL.Class):
cid = contract_uri_wrap(c)
logging.info("C={}".format(cid))
for lit in rg.objects(c, RDFS.label):
label_map[cid] = lit.value
digraph.add_node(cid, label=lit.value)
for s in rg.objects(c, RDFS.subClassOf):
# todo - blank nodes
sid = contract_uri_wrap(s)
digraph.add_edge(sid, cid, pred='subClassOf')
logging.info("G={}".format(digraph))
payload = {
'graph': digraph,
#'xref_graph': xref_graph,
#'graphdoc': obographdoc,
#'logical_definitions': logical_definitions
}
ont = Ontology(handle='wd', payload=payload)
return ont | [
"def",
"rdfgraph_to_ontol",
"(",
"rg",
")",
":",
"digraph",
"=",
"networkx",
".",
"MultiDiGraph",
"(",
")",
"from",
"rdflib",
".",
"namespace",
"import",
"RDF",
"label_map",
"=",
"{",
"}",
"for",
"c",
"in",
"rg",
".",
"subjects",
"(",
"RDF",
".",
"type... | Return an Ontology object from an rdflib graph object
Status: Incomplete | [
"Return",
"an",
"Ontology",
"object",
"from",
"an",
"rdflib",
"graph",
"object"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/rdflib_bridge.py#L15-L44 | train | 204,787 |
biolink/ontobio | ontobio/golr/golr_associations.py | get_association | def get_association(id, **kwargs):
"""
Fetch an association object by ID
"""
results = search_associations(id=id, **kwargs)
assoc = results['associations'][0] if len(results['associations']) > 0 else {}
return assoc | python | def get_association(id, **kwargs):
"""
Fetch an association object by ID
"""
results = search_associations(id=id, **kwargs)
assoc = results['associations'][0] if len(results['associations']) > 0 else {}
return assoc | [
"def",
"get_association",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"search_associations",
"(",
"id",
"=",
"id",
",",
"*",
"*",
"kwargs",
")",
"assoc",
"=",
"results",
"[",
"'associations'",
"]",
"[",
"0",
"]",
"if",
"len",
"(",
... | Fetch an association object by ID | [
"Fetch",
"an",
"association",
"object",
"by",
"ID"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_associations.py#L20-L26 | train | 204,788 |
biolink/ontobio | ontobio/golr/golr_associations.py | search_associations | def search_associations(**kwargs):
"""
Fetch a set of association objects based on a query.
"""
logging.info("CREATING_GOLR_QUERY {}".format(kwargs))
q = GolrAssociationQuery(**kwargs)
return q.exec() | python | def search_associations(**kwargs):
"""
Fetch a set of association objects based on a query.
"""
logging.info("CREATING_GOLR_QUERY {}".format(kwargs))
q = GolrAssociationQuery(**kwargs)
return q.exec() | [
"def",
"search_associations",
"(",
"*",
"*",
"kwargs",
")",
":",
"logging",
".",
"info",
"(",
"\"CREATING_GOLR_QUERY {}\"",
".",
"format",
"(",
"kwargs",
")",
")",
"q",
"=",
"GolrAssociationQuery",
"(",
"*",
"*",
"kwargs",
")",
"return",
"q",
".",
"exec",
... | Fetch a set of association objects based on a query. | [
"Fetch",
"a",
"set",
"of",
"association",
"objects",
"based",
"on",
"a",
"query",
"."
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_associations.py#L28-L35 | train | 204,789 |
biolink/ontobio | ontobio/golr/golr_associations.py | bulk_fetch | def bulk_fetch(subject_category, object_category, taxon, rows=MAX_ROWS, **kwargs):
"""
Fetch associations for a species and pair of categories in bulk.
Arguments:
- subject_category: String (not None)
- object_category: String (not None)
- taxon: String
- rows: int
Additionally, any argument for search_associations can be passed
"""
assert subject_category is not None
assert object_category is not None
time.sleep(1)
logging.info("Bulk query: {} {} {}".format(subject_category, object_category, taxon))
assocs = search_associations_compact(subject_category=subject_category,
object_category=object_category,
subject_taxon=taxon,
rows=rows,
iterate=True,
**kwargs)
logging.info("Rows retrieved: {}".format(len(assocs)))
if len(assocs) == 0:
logging.error("No associations returned for query: {} {} {}".format(subject_category, object_category, taxon))
return assocs | python | def bulk_fetch(subject_category, object_category, taxon, rows=MAX_ROWS, **kwargs):
"""
Fetch associations for a species and pair of categories in bulk.
Arguments:
- subject_category: String (not None)
- object_category: String (not None)
- taxon: String
- rows: int
Additionally, any argument for search_associations can be passed
"""
assert subject_category is not None
assert object_category is not None
time.sleep(1)
logging.info("Bulk query: {} {} {}".format(subject_category, object_category, taxon))
assocs = search_associations_compact(subject_category=subject_category,
object_category=object_category,
subject_taxon=taxon,
rows=rows,
iterate=True,
**kwargs)
logging.info("Rows retrieved: {}".format(len(assocs)))
if len(assocs) == 0:
logging.error("No associations returned for query: {} {} {}".format(subject_category, object_category, taxon))
return assocs | [
"def",
"bulk_fetch",
"(",
"subject_category",
",",
"object_category",
",",
"taxon",
",",
"rows",
"=",
"MAX_ROWS",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"subject_category",
"is",
"not",
"None",
"assert",
"object_category",
"is",
"not",
"None",
"time",
... | Fetch associations for a species and pair of categories in bulk.
Arguments:
- subject_category: String (not None)
- object_category: String (not None)
- taxon: String
- rows: int
Additionally, any argument for search_associations can be passed | [
"Fetch",
"associations",
"for",
"a",
"species",
"and",
"pair",
"of",
"categories",
"in",
"bulk",
"."
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_associations.py#L126-L152 | train | 204,790 |
biolink/ontobio | ontobio/golr/golr_associations.py | search_associations_go | def search_associations_go(
subject_category=None,
object_category=None,
relation=None,
subject=None,
**kwargs):
"""
Perform association search using Monarch golr
"""
go_golr_url = "http://golr.geneontology.org/solr/"
go_solr = pysolr.Solr(go_golr_url, timeout=5)
go_solr.get_session().headers['User-Agent'] = get_user_agent(caller_name=__name__)
return search_associations(subject_category,
object_category,
relation,
subject,
solr=go_solr,
field_mapping=goassoc_fieldmap(),
**kwargs) | python | def search_associations_go(
subject_category=None,
object_category=None,
relation=None,
subject=None,
**kwargs):
"""
Perform association search using Monarch golr
"""
go_golr_url = "http://golr.geneontology.org/solr/"
go_solr = pysolr.Solr(go_golr_url, timeout=5)
go_solr.get_session().headers['User-Agent'] = get_user_agent(caller_name=__name__)
return search_associations(subject_category,
object_category,
relation,
subject,
solr=go_solr,
field_mapping=goassoc_fieldmap(),
**kwargs) | [
"def",
"search_associations_go",
"(",
"subject_category",
"=",
"None",
",",
"object_category",
"=",
"None",
",",
"relation",
"=",
"None",
",",
"subject",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"go_golr_url",
"=",
"\"http://golr.geneontology.org/solr/\"",
... | Perform association search using Monarch golr | [
"Perform",
"association",
"search",
"using",
"Monarch",
"golr"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_associations.py#L222-L240 | train | 204,791 |
biolink/ontobio | ontobio/golr/golr_associations.py | select_distinct | def select_distinct(distinct_field=None, **kwargs):
"""
select distinct values for a given field for a given a query
"""
results = search_associations(rows=0,
select_fields=[],
facet_field_limits = {
distinct_field : -1
},
facet_fields=[distinct_field],
**kwargs
)
# TODO: map field
return list(results['facet_counts'][distinct_field].keys()) | python | def select_distinct(distinct_field=None, **kwargs):
"""
select distinct values for a given field for a given a query
"""
results = search_associations(rows=0,
select_fields=[],
facet_field_limits = {
distinct_field : -1
},
facet_fields=[distinct_field],
**kwargs
)
# TODO: map field
return list(results['facet_counts'][distinct_field].keys()) | [
"def",
"select_distinct",
"(",
"distinct_field",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"search_associations",
"(",
"rows",
"=",
"0",
",",
"select_fields",
"=",
"[",
"]",
",",
"facet_field_limits",
"=",
"{",
"distinct_field",
":",
... | select distinct values for a given field for a given a query | [
"select",
"distinct",
"values",
"for",
"a",
"given",
"field",
"for",
"a",
"given",
"a",
"query"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/golr/golr_associations.py#L242-L255 | train | 204,792 |
biolink/ontobio | ontobio/sim/api/semsearch.py | SemSearchEngine.pw_score_cosine | def pw_score_cosine(self, s1 : ClassId, s2 : ClassId) -> SimScore:
"""
Cosine similarity of two subjects
Arguments
---------
s1 : str
class id
Return
------
number
A number between 0 and 1
"""
df = self.assoc_df
slice1 = df.loc[s1].values
slice2 = df.loc[s2].values
return 1 - cosine(slice1, slice2) | python | def pw_score_cosine(self, s1 : ClassId, s2 : ClassId) -> SimScore:
"""
Cosine similarity of two subjects
Arguments
---------
s1 : str
class id
Return
------
number
A number between 0 and 1
"""
df = self.assoc_df
slice1 = df.loc[s1].values
slice2 = df.loc[s2].values
return 1 - cosine(slice1, slice2) | [
"def",
"pw_score_cosine",
"(",
"self",
",",
"s1",
":",
"ClassId",
",",
"s2",
":",
"ClassId",
")",
"->",
"SimScore",
":",
"df",
"=",
"self",
".",
"assoc_df",
"slice1",
"=",
"df",
".",
"loc",
"[",
"s1",
"]",
".",
"values",
"slice2",
"=",
"df",
".",
... | Cosine similarity of two subjects
Arguments
---------
s1 : str
class id
Return
------
number
A number between 0 and 1 | [
"Cosine",
"similarity",
"of",
"two",
"subjects"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/semsearch.py#L60-L78 | train | 204,793 |
biolink/ontobio | ontobio/sim/api/semsearch.py | SemSearchEngine.calculate_mrcas | def calculate_mrcas(self, c1 : ClassId, c2 : ClassId) -> Set[ClassId]:
"""
Calculate the MRCA for a class pair
"""
G = self.G
# reflexive ancestors
ancs1 = self._ancestors(c1) | {c1}
ancs2 = self._ancestors(c2) | {c2}
common_ancestors = ancs1 & ancs2
redundant = set()
for a in common_ancestors:
redundant = redundant | nx.ancestors(G, a)
return common_ancestors - redundant | python | def calculate_mrcas(self, c1 : ClassId, c2 : ClassId) -> Set[ClassId]:
"""
Calculate the MRCA for a class pair
"""
G = self.G
# reflexive ancestors
ancs1 = self._ancestors(c1) | {c1}
ancs2 = self._ancestors(c2) | {c2}
common_ancestors = ancs1 & ancs2
redundant = set()
for a in common_ancestors:
redundant = redundant | nx.ancestors(G, a)
return common_ancestors - redundant | [
"def",
"calculate_mrcas",
"(",
"self",
",",
"c1",
":",
"ClassId",
",",
"c2",
":",
"ClassId",
")",
"->",
"Set",
"[",
"ClassId",
"]",
":",
"G",
"=",
"self",
".",
"G",
"# reflexive ancestors",
"ancs1",
"=",
"self",
".",
"_ancestors",
"(",
"c1",
")",
"|"... | Calculate the MRCA for a class pair | [
"Calculate",
"the",
"MRCA",
"for",
"a",
"class",
"pair"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/semsearch.py#L108-L120 | train | 204,794 |
biolink/ontobio | ontobio/sim/api/semsearch.py | SemSearchEngine.pw_compare_class_sets | def pw_compare_class_sets(self, cset1: Set[ClassId], cset2: Set[ClassId]) -> Tuple[ICValue, ICValue, ICValue]:
"""
Compare two class profiles
"""
pairs = self.mica_ic_df.loc[cset1, cset2]
max0 = pairs.max(axis=0)
max1 = pairs.max(axis=1)
idxmax0 = pairs.idxmax(axis=0)
idxmax1 = pairs.idxmax(axis=1)
mean0 = max0.mean()
mean1 = max1.mean()
return (mean0+mean1)/2, mean0, mean1 | python | def pw_compare_class_sets(self, cset1: Set[ClassId], cset2: Set[ClassId]) -> Tuple[ICValue, ICValue, ICValue]:
"""
Compare two class profiles
"""
pairs = self.mica_ic_df.loc[cset1, cset2]
max0 = pairs.max(axis=0)
max1 = pairs.max(axis=1)
idxmax0 = pairs.idxmax(axis=0)
idxmax1 = pairs.idxmax(axis=1)
mean0 = max0.mean()
mean1 = max1.mean()
return (mean0+mean1)/2, mean0, mean1 | [
"def",
"pw_compare_class_sets",
"(",
"self",
",",
"cset1",
":",
"Set",
"[",
"ClassId",
"]",
",",
"cset2",
":",
"Set",
"[",
"ClassId",
"]",
")",
"->",
"Tuple",
"[",
"ICValue",
",",
"ICValue",
",",
"ICValue",
"]",
":",
"pairs",
"=",
"self",
".",
"mica_... | Compare two class profiles | [
"Compare",
"two",
"class",
"profiles"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/semsearch.py#L177-L188 | train | 204,795 |
biolink/ontobio | ontobio/sparql/skos.py | Skos.process_file | def process_file(self,filename=None, format=None):
"""
Parse a file into an ontology object, using rdflib
"""
rdfgraph = rdflib.Graph()
if format is None:
if filename.endswith(".ttl"):
format='turtle'
elif filename.endswith(".rdf"):
format='xml'
rdfgraph.parse(filename, format=format)
return self.process_rdfgraph(rdfgraph) | python | def process_file(self,filename=None, format=None):
"""
Parse a file into an ontology object, using rdflib
"""
rdfgraph = rdflib.Graph()
if format is None:
if filename.endswith(".ttl"):
format='turtle'
elif filename.endswith(".rdf"):
format='xml'
rdfgraph.parse(filename, format=format)
return self.process_rdfgraph(rdfgraph) | [
"def",
"process_file",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"format",
"=",
"None",
")",
":",
"rdfgraph",
"=",
"rdflib",
".",
"Graph",
"(",
")",
"if",
"format",
"is",
"None",
":",
"if",
"filename",
".",
"endswith",
"(",
"\".ttl\"",
")",
":"... | Parse a file into an ontology object, using rdflib | [
"Parse",
"a",
"file",
"into",
"an",
"ontology",
"object",
"using",
"rdflib"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/skos.py#L38-L49 | train | 204,796 |
biolink/ontobio | ontobio/sparql/skos.py | Skos.process_rdfgraph | def process_rdfgraph(self, rg, ont=None):
"""
Transform a skos terminology expressed in an rdf graph into an Ontology object
Arguments
---------
rg: rdflib.Graph
graph object
Returns
-------
Ontology
"""
# TODO: ontology metadata
if ont is None:
ont = Ontology()
subjs = list(rg.subjects(RDF.type, SKOS.ConceptScheme))
if len(subjs) == 0:
logging.warning("No ConceptScheme")
else:
ont.id = self._uri2id(subjs[0])
subset_map = {}
for concept in rg.subjects(RDF.type, SKOS.Concept):
for s in self._get_schemes(rg, concept):
subset_map[self._uri2id(s)] = s
for concept in sorted(list(rg.subjects(RDF.type, SKOS.Concept))):
concept_uri = str(concept)
id=self._uri2id(concept)
logging.info("ADDING: {}".format(id))
ont.add_node(id, self._get_label(rg,concept))
for defn in rg.objects(concept, SKOS.definition):
if (defn.language == self.lang):
td = TextDefinition(id, escape_value(defn.value))
ont.add_text_definition(td)
for s in rg.objects(concept, SKOS.broader):
ont.add_parent(id, self._uri2id(s))
for s in rg.objects(concept, SKOS.related):
ont.add_parent(id, self._uri2id(s), self._uri2id(SKOS.related))
for m in rg.objects(concept, SKOS.exactMatch):
ont.add_xref(id, self._uri2id(m))
for m in rg.objects(concept, SKOS.altLabel):
syn = Synonym(id, val=self._uri2id(m))
ont.add_synonym(syn)
for s in self._get_schemes(rg,concept):
ont.add_to_subset(id, self._uri2id(s))
return ont | python | def process_rdfgraph(self, rg, ont=None):
"""
Transform a skos terminology expressed in an rdf graph into an Ontology object
Arguments
---------
rg: rdflib.Graph
graph object
Returns
-------
Ontology
"""
# TODO: ontology metadata
if ont is None:
ont = Ontology()
subjs = list(rg.subjects(RDF.type, SKOS.ConceptScheme))
if len(subjs) == 0:
logging.warning("No ConceptScheme")
else:
ont.id = self._uri2id(subjs[0])
subset_map = {}
for concept in rg.subjects(RDF.type, SKOS.Concept):
for s in self._get_schemes(rg, concept):
subset_map[self._uri2id(s)] = s
for concept in sorted(list(rg.subjects(RDF.type, SKOS.Concept))):
concept_uri = str(concept)
id=self._uri2id(concept)
logging.info("ADDING: {}".format(id))
ont.add_node(id, self._get_label(rg,concept))
for defn in rg.objects(concept, SKOS.definition):
if (defn.language == self.lang):
td = TextDefinition(id, escape_value(defn.value))
ont.add_text_definition(td)
for s in rg.objects(concept, SKOS.broader):
ont.add_parent(id, self._uri2id(s))
for s in rg.objects(concept, SKOS.related):
ont.add_parent(id, self._uri2id(s), self._uri2id(SKOS.related))
for m in rg.objects(concept, SKOS.exactMatch):
ont.add_xref(id, self._uri2id(m))
for m in rg.objects(concept, SKOS.altLabel):
syn = Synonym(id, val=self._uri2id(m))
ont.add_synonym(syn)
for s in self._get_schemes(rg,concept):
ont.add_to_subset(id, self._uri2id(s))
return ont | [
"def",
"process_rdfgraph",
"(",
"self",
",",
"rg",
",",
"ont",
"=",
"None",
")",
":",
"# TODO: ontology metadata",
"if",
"ont",
"is",
"None",
":",
"ont",
"=",
"Ontology",
"(",
")",
"subjs",
"=",
"list",
"(",
"rg",
".",
"subjects",
"(",
"RDF",
".",
"t... | Transform a skos terminology expressed in an rdf graph into an Ontology object
Arguments
---------
rg: rdflib.Graph
graph object
Returns
-------
Ontology | [
"Transform",
"a",
"skos",
"terminology",
"expressed",
"in",
"an",
"rdf",
"graph",
"into",
"an",
"Ontology",
"object"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/skos.py#L51-L105 | train | 204,797 |
biolink/ontobio | ontobio/sim/api/owlsim2.py | get_attribute_information_profile | def get_attribute_information_profile(
url: str,
profile: Optional[Tuple[str]]=None,
categories: Optional[Tuple[str]]=None) -> Dict:
"""
Get the information content for a list of phenotypes
and the annotation sufficiency simple and
and categorical scores if categories are provied
Ref: https://zenodo.org/record/834091#.W8ZnCxhlCV4
Note that the simple score varies slightly from the pub in that
it uses max_max_ic instead of mean_max_ic
If no arguments are passed this function returns the
system (loaded cohort) stats
:raises JSONDecodeError: If the response body does not contain valid json.
"""
owlsim_url = url + 'getAttributeInformationProfile'
params = {
'a': profile,
'r': categories
}
return requests.get(owlsim_url, params=params, timeout=TIMEOUT).json() | python | def get_attribute_information_profile(
url: str,
profile: Optional[Tuple[str]]=None,
categories: Optional[Tuple[str]]=None) -> Dict:
"""
Get the information content for a list of phenotypes
and the annotation sufficiency simple and
and categorical scores if categories are provied
Ref: https://zenodo.org/record/834091#.W8ZnCxhlCV4
Note that the simple score varies slightly from the pub in that
it uses max_max_ic instead of mean_max_ic
If no arguments are passed this function returns the
system (loaded cohort) stats
:raises JSONDecodeError: If the response body does not contain valid json.
"""
owlsim_url = url + 'getAttributeInformationProfile'
params = {
'a': profile,
'r': categories
}
return requests.get(owlsim_url, params=params, timeout=TIMEOUT).json() | [
"def",
"get_attribute_information_profile",
"(",
"url",
":",
"str",
",",
"profile",
":",
"Optional",
"[",
"Tuple",
"[",
"str",
"]",
"]",
"=",
"None",
",",
"categories",
":",
"Optional",
"[",
"Tuple",
"[",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"Dict"... | Get the information content for a list of phenotypes
and the annotation sufficiency simple and
and categorical scores if categories are provied
Ref: https://zenodo.org/record/834091#.W8ZnCxhlCV4
Note that the simple score varies slightly from the pub in that
it uses max_max_ic instead of mean_max_ic
If no arguments are passed this function returns the
system (loaded cohort) stats
:raises JSONDecodeError: If the response body does not contain valid json. | [
"Get",
"the",
"information",
"content",
"for",
"a",
"list",
"of",
"phenotypes",
"and",
"the",
"annotation",
"sufficiency",
"simple",
"and",
"and",
"categorical",
"scores",
"if",
"categories",
"are",
"provied"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim2.py#L77-L100 | train | 204,798 |
biolink/ontobio | ontobio/sim/api/owlsim2.py | OwlSim2Api.search | def search(
self,
id_list: List,
negated_classes: List,
limit: Optional[int] = 100,
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult:
"""
Owlsim2 search, calls search_by_attribute_set, and converts to SimResult object
:raises JSONDecodeError: If the owlsim response is not valid json.
"""
return self.filtered_search(
id_list=id_list,
negated_classes=negated_classes,
limit=limit,
taxon_filter=None,
category_filter=None,
method=method
) | python | def search(
self,
id_list: List,
negated_classes: List,
limit: Optional[int] = 100,
method: Optional[SimAlgorithm] = SimAlgorithm.PHENODIGM) -> SimResult:
"""
Owlsim2 search, calls search_by_attribute_set, and converts to SimResult object
:raises JSONDecodeError: If the owlsim response is not valid json.
"""
return self.filtered_search(
id_list=id_list,
negated_classes=negated_classes,
limit=limit,
taxon_filter=None,
category_filter=None,
method=method
) | [
"def",
"search",
"(",
"self",
",",
"id_list",
":",
"List",
",",
"negated_classes",
":",
"List",
",",
"limit",
":",
"Optional",
"[",
"int",
"]",
"=",
"100",
",",
"method",
":",
"Optional",
"[",
"SimAlgorithm",
"]",
"=",
"SimAlgorithm",
".",
"PHENODIGM",
... | Owlsim2 search, calls search_by_attribute_set, and converts to SimResult object
:raises JSONDecodeError: If the owlsim response is not valid json. | [
"Owlsim2",
"search",
"calls",
"search_by_attribute_set",
"and",
"converts",
"to",
"SimResult",
"object"
] | 4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345 | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sim/api/owlsim2.py#L228-L247 | train | 204,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.