repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffSwitch.on | def on(self):
"""Send ON command to device."""
on_command = StandardSend(self._address,
COMMAND_LIGHT_ON_0X11_NONE, 0xff)
self._send_method(on_command,
self._on_message_received) | python | def on(self):
"""Send ON command to device."""
on_command = StandardSend(self._address,
COMMAND_LIGHT_ON_0X11_NONE, 0xff)
self._send_method(on_command,
self._on_message_received) | [
"def",
"on",
"(",
"self",
")",
":",
"on_command",
"=",
"StandardSend",
"(",
"self",
".",
"_address",
",",
"COMMAND_LIGHT_ON_0X11_NONE",
",",
"0xff",
")",
"self",
".",
"_send_method",
"(",
"on_command",
",",
"self",
".",
"_on_message_received",
")"
] | Send ON command to device. | [
"Send",
"ON",
"command",
"to",
"device",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L196-L201 | train | 39,400 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffSwitch_OutletTop.off | def off(self):
"""Send OFF command to device."""
self._send_method(StandardSend(self._address,
COMMAND_LIGHT_OFF_0X13_0X00),
self._off_message_received) | python | def off(self):
"""Send OFF command to device."""
self._send_method(StandardSend(self._address,
COMMAND_LIGHT_OFF_0X13_0X00),
self._off_message_received) | [
"def",
"off",
"(",
"self",
")",
":",
"self",
".",
"_send_method",
"(",
"StandardSend",
"(",
"self",
".",
"_address",
",",
"COMMAND_LIGHT_OFF_0X13_0X00",
")",
",",
"self",
".",
"_off_message_received",
")"
] | Send OFF command to device. | [
"Send",
"OFF",
"command",
"to",
"device",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L241-L245 | train | 39,401 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffSwitch_OutletTop._send_status_0x01_request | def _send_status_0x01_request(self):
"""Sent status request to device."""
status_command = StandardSend(self._address,
COMMAND_LIGHT_STATUS_REQUEST_0X19_0X01)
self._send_method(status_command, self._status_message_0x01_received) | python | def _send_status_0x01_request(self):
"""Sent status request to device."""
status_command = StandardSend(self._address,
COMMAND_LIGHT_STATUS_REQUEST_0X19_0X01)
self._send_method(status_command, self._status_message_0x01_received) | [
"def",
"_send_status_0x01_request",
"(",
"self",
")",
":",
"status_command",
"=",
"StandardSend",
"(",
"self",
".",
"_address",
",",
"COMMAND_LIGHT_STATUS_REQUEST_0X19_0X01",
")",
"self",
".",
"_send_method",
"(",
"status_command",
",",
"self",
".",
"_status_message_0... | Sent status request to device. | [
"Sent",
"status",
"request",
"to",
"device",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L247-L251 | train | 39,402 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffSwitch_OutletTop._status_message_0x01_received | def _status_message_0x01_received(self, msg):
"""Handle status received messages.
The following status values can be received:
0x00 = Both Outlets Off
0x01 = Only Top Outlet On
0x02 = Only Bottom Outlet On
0x03 = Both Outlets On
"""
if msg.cmd2 == 0x00 or msg.cmd2 == 0x02:
self._update_subscribers(0x00)
elif msg.cmd2 == 0x01 or msg.cmd2 == 0x03:
self._update_subscribers(0xff)
else:
raise ValueError | python | def _status_message_0x01_received(self, msg):
"""Handle status received messages.
The following status values can be received:
0x00 = Both Outlets Off
0x01 = Only Top Outlet On
0x02 = Only Bottom Outlet On
0x03 = Both Outlets On
"""
if msg.cmd2 == 0x00 or msg.cmd2 == 0x02:
self._update_subscribers(0x00)
elif msg.cmd2 == 0x01 or msg.cmd2 == 0x03:
self._update_subscribers(0xff)
else:
raise ValueError | [
"def",
"_status_message_0x01_received",
"(",
"self",
",",
"msg",
")",
":",
"if",
"msg",
".",
"cmd2",
"==",
"0x00",
"or",
"msg",
".",
"cmd2",
"==",
"0x02",
":",
"self",
".",
"_update_subscribers",
"(",
"0x00",
")",
"elif",
"msg",
".",
"cmd2",
"==",
"0x0... | Handle status received messages.
The following status values can be received:
0x00 = Both Outlets Off
0x01 = Only Top Outlet On
0x02 = Only Bottom Outlet On
0x03 = Both Outlets On | [
"Handle",
"status",
"received",
"messages",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L253-L267 | train | 39,403 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffSwitch_OutletBottom.on | def on(self):
"""Send an ON message to device group."""
on_command = ExtendedSend(self._address,
COMMAND_LIGHT_ON_0X11_NONE,
self._udata,
cmd2=0xff)
on_command.set_checksum()
self._send_method(on_command, self._on_message_received) | python | def on(self):
"""Send an ON message to device group."""
on_command = ExtendedSend(self._address,
COMMAND_LIGHT_ON_0X11_NONE,
self._udata,
cmd2=0xff)
on_command.set_checksum()
self._send_method(on_command, self._on_message_received) | [
"def",
"on",
"(",
"self",
")",
":",
"on_command",
"=",
"ExtendedSend",
"(",
"self",
".",
"_address",
",",
"COMMAND_LIGHT_ON_0X11_NONE",
",",
"self",
".",
"_udata",
",",
"cmd2",
"=",
"0xff",
")",
"on_command",
".",
"set_checksum",
"(",
")",
"self",
".",
"... | Send an ON message to device group. | [
"Send",
"an",
"ON",
"message",
"to",
"device",
"group",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L295-L302 | train | 39,404 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffSwitch_OutletBottom.off | def off(self):
"""Send an OFF message to device group."""
off_command = ExtendedSend(self._address,
COMMAND_LIGHT_OFF_0X13_0X00,
self._udata)
off_command.set_checksum()
self._send_method(off_command, self._off_message_received) | python | def off(self):
"""Send an OFF message to device group."""
off_command = ExtendedSend(self._address,
COMMAND_LIGHT_OFF_0X13_0X00,
self._udata)
off_command.set_checksum()
self._send_method(off_command, self._off_message_received) | [
"def",
"off",
"(",
"self",
")",
":",
"off_command",
"=",
"ExtendedSend",
"(",
"self",
".",
"_address",
",",
"COMMAND_LIGHT_OFF_0X13_0X00",
",",
"self",
".",
"_udata",
")",
"off_command",
".",
"set_checksum",
"(",
")",
"self",
".",
"_send_method",
"(",
"off_c... | Send an OFF message to device group. | [
"Send",
"an",
"OFF",
"message",
"to",
"device",
"group",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L304-L310 | train | 39,405 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffSwitch_OutletBottom._send_status_0x01_request | def _send_status_0x01_request(self):
"""Send a status request."""
status_command = StandardSend(self._address,
COMMAND_LIGHT_STATUS_REQUEST_0X19_0X01)
self._send_method(status_command, self._status_message_received) | python | def _send_status_0x01_request(self):
"""Send a status request."""
status_command = StandardSend(self._address,
COMMAND_LIGHT_STATUS_REQUEST_0X19_0X01)
self._send_method(status_command, self._status_message_received) | [
"def",
"_send_status_0x01_request",
"(",
"self",
")",
":",
"status_command",
"=",
"StandardSend",
"(",
"self",
".",
"_address",
",",
"COMMAND_LIGHT_STATUS_REQUEST_0X19_0X01",
")",
"self",
".",
"_send_method",
"(",
"status_command",
",",
"self",
".",
"_status_message_r... | Send a status request. | [
"Send",
"a",
"status",
"request",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L312-L316 | train | 39,406 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffSwitch_OutletBottom._status_message_received | def _status_message_received(self, msg):
"""Receive a status message.
The following status values can be received:
0x00 = Both Outlets Off
0x01 = Only Top Outlet On
0x02 = Only Bottom Outlet On
0x03 = Both Outlets On
"""
if msg.cmd2 == 0x00 or msg.cmd2 == 0x01:
self._update_subscribers(0x00)
elif msg.cmd2 == 0x02 or msg.cmd2 == 0x03:
self._update_subscribers(0xff)
else:
raise ValueError | python | def _status_message_received(self, msg):
"""Receive a status message.
The following status values can be received:
0x00 = Both Outlets Off
0x01 = Only Top Outlet On
0x02 = Only Bottom Outlet On
0x03 = Both Outlets On
"""
if msg.cmd2 == 0x00 or msg.cmd2 == 0x01:
self._update_subscribers(0x00)
elif msg.cmd2 == 0x02 or msg.cmd2 == 0x03:
self._update_subscribers(0xff)
else:
raise ValueError | [
"def",
"_status_message_received",
"(",
"self",
",",
"msg",
")",
":",
"if",
"msg",
".",
"cmd2",
"==",
"0x00",
"or",
"msg",
".",
"cmd2",
"==",
"0x01",
":",
"self",
".",
"_update_subscribers",
"(",
"0x00",
")",
"elif",
"msg",
".",
"cmd2",
"==",
"0x02",
... | Receive a status message.
The following status values can be received:
0x00 = Both Outlets Off
0x01 = Only Top Outlet On
0x02 = Only Bottom Outlet On
0x03 = Both Outlets On | [
"Receive",
"a",
"status",
"message",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L318-L332 | train | 39,407 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OpenClosedRelay.close | def close(self):
"""Send CLOSE command to device."""
close_command = StandardSend(self._address,
COMMAND_LIGHT_OFF_0X13_0X00)
self._send_method(close_command, self._close_message_received) | python | def close(self):
"""Send CLOSE command to device."""
close_command = StandardSend(self._address,
COMMAND_LIGHT_OFF_0X13_0X00)
self._send_method(close_command, self._close_message_received) | [
"def",
"close",
"(",
"self",
")",
":",
"close_command",
"=",
"StandardSend",
"(",
"self",
".",
"_address",
",",
"COMMAND_LIGHT_OFF_0X13_0X00",
")",
"self",
".",
"_send_method",
"(",
"close_command",
",",
"self",
".",
"_close_message_received",
")"
] | Send CLOSE command to device. | [
"Send",
"CLOSE",
"command",
"to",
"device",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L365-L369 | train | 39,408 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypad.led_changed | def led_changed(self, addr, group, val):
"""Capture a change to the LED for this button."""
_LOGGER.debug("Button %d LED changed from %d to %d",
self._group, self._value, val)
led_on = bool(val)
if led_on != bool(self._value):
self._update_subscribers(int(led_on)) | python | def led_changed(self, addr, group, val):
"""Capture a change to the LED for this button."""
_LOGGER.debug("Button %d LED changed from %d to %d",
self._group, self._value, val)
led_on = bool(val)
if led_on != bool(self._value):
self._update_subscribers(int(led_on)) | [
"def",
"led_changed",
"(",
"self",
",",
"addr",
",",
"group",
",",
"val",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Button %d LED changed from %d to %d\"",
",",
"self",
".",
"_group",
",",
"self",
".",
"_value",
",",
"val",
")",
"led_on",
"=",
"bool",
"... | Capture a change to the LED for this button. | [
"Capture",
"a",
"change",
"to",
"the",
"LED",
"for",
"this",
"button",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L526-L532 | train | 39,409 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypad._status_message_received | def _status_message_received(self, msg):
"""Receive confirmation that the status message is coming.
The real status message is the extended direct message.
"""
if not self._status_received:
asyncio.ensure_future(self._confirm_status_received(),
loop=self._loop) | python | def _status_message_received(self, msg):
"""Receive confirmation that the status message is coming.
The real status message is the extended direct message.
"""
if not self._status_received:
asyncio.ensure_future(self._confirm_status_received(),
loop=self._loop) | [
"def",
"_status_message_received",
"(",
"self",
",",
"msg",
")",
":",
"if",
"not",
"self",
".",
"_status_received",
":",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_confirm_status_received",
"(",
")",
",",
"loop",
"=",
"self",
".",
"_loop",
")"
] | Receive confirmation that the status message is coming.
The real status message is the extended direct message. | [
"Receive",
"confirmation",
"that",
"the",
"status",
"message",
"is",
"coming",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L671-L678 | train | 39,410 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypad._status_extended_message_received | def _status_extended_message_received(self, msg):
"""Receeive an extended status message.
Status message received:
cmd1: 0x2e
cmd2: 0x00
flags: Direct Extended
d1: group
d2: 0x01
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask
d14: Check sum
"""
self._status_received = True
self._status_retries = 0
_LOGGER.debug("Extended status message received")
if self._status_response_lock.locked():
self._status_response_lock.release()
user_data = msg.userdata
# self._update_subscribers(user_data['d8'])
self._set_status_data(user_data) | python | def _status_extended_message_received(self, msg):
"""Receeive an extended status message.
Status message received:
cmd1: 0x2e
cmd2: 0x00
flags: Direct Extended
d1: group
d2: 0x01
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask
d14: Check sum
"""
self._status_received = True
self._status_retries = 0
_LOGGER.debug("Extended status message received")
if self._status_response_lock.locked():
self._status_response_lock.release()
user_data = msg.userdata
# self._update_subscribers(user_data['d8'])
self._set_status_data(user_data) | [
"def",
"_status_extended_message_received",
"(",
"self",
",",
"msg",
")",
":",
"self",
".",
"_status_received",
"=",
"True",
"self",
".",
"_status_retries",
"=",
"0",
"_LOGGER",
".",
"debug",
"(",
"\"Extended status message received\"",
")",
"if",
"self",
".",
"... | Receeive an extended status message.
Status message received:
cmd1: 0x2e
cmd2: 0x00
flags: Direct Extended
d1: group
d2: 0x01
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask
d14: Check sum | [
"Receeive",
"an",
"extended",
"status",
"message",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L703-L732 | train | 39,411 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypad._property_set | def _property_set(self, msg):
"""Set command received and acknowledged."""
prop = self._sent_property.get('prop')
if prop and hasattr(self, prop):
setattr(self, prop, self._sent_property.get('val'))
self._sent_property = {} | python | def _property_set(self, msg):
"""Set command received and acknowledged."""
prop = self._sent_property.get('prop')
if prop and hasattr(self, prop):
setattr(self, prop, self._sent_property.get('val'))
self._sent_property = {} | [
"def",
"_property_set",
"(",
"self",
",",
"msg",
")",
":",
"prop",
"=",
"self",
".",
"_sent_property",
".",
"get",
"(",
"'prop'",
")",
"if",
"prop",
"and",
"hasattr",
"(",
"self",
",",
"prop",
")",
":",
"setattr",
"(",
"self",
",",
"prop",
",",
"se... | Set command received and acknowledged. | [
"Set",
"command",
"received",
"and",
"acknowledged",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L734-L739 | train | 39,412 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypad._set_status_data | def _set_status_data(self, userdata):
"""Set status properties from userdata response.
Response values:
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask
"""
self._on_mask = userdata['d3']
self._off_mask = userdata['d4']
self._x10_house_code = userdata['d5']
self._x10_unit = userdata['d6']
self._ramp_rate = userdata['d7']
self._on_level = userdata['d8']
self._led_brightness = userdata['d9']
self._non_toggle_mask = userdata['d10']
self._led_bit_mask = userdata['d11']
self._x10_all_bit_mask = userdata['d12']
self._on_off_bit_mask = userdata['d13']
self._trigger_group_bit_mask = userdata['d14'] | python | def _set_status_data(self, userdata):
"""Set status properties from userdata response.
Response values:
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask
"""
self._on_mask = userdata['d3']
self._off_mask = userdata['d4']
self._x10_house_code = userdata['d5']
self._x10_unit = userdata['d6']
self._ramp_rate = userdata['d7']
self._on_level = userdata['d8']
self._led_brightness = userdata['d9']
self._non_toggle_mask = userdata['d10']
self._led_bit_mask = userdata['d11']
self._x10_all_bit_mask = userdata['d12']
self._on_off_bit_mask = userdata['d13']
self._trigger_group_bit_mask = userdata['d14'] | [
"def",
"_set_status_data",
"(",
"self",
",",
"userdata",
")",
":",
"self",
".",
"_on_mask",
"=",
"userdata",
"[",
"'d3'",
"]",
"self",
".",
"_off_mask",
"=",
"userdata",
"[",
"'d4'",
"]",
"self",
".",
"_x10_house_code",
"=",
"userdata",
"[",
"'d5'",
"]",... | Set status properties from userdata response.
Response values:
d3: On Mask
d4: Off Mask
d5: X10 House Code
d6: X10 Unit
d7: Ramp Rate
d8: On-Level
d9: LED Brightness
d10: Non-Toggle Mask
d11: LED Bit Mask
d12: X10 ALL Bit Mask
d13: On/Off Bit Mask | [
"Set",
"status",
"properties",
"from",
"userdata",
"response",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L751-L778 | train | 39,413 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypad._create_set_property_msg | def _create_set_property_msg(self, prop, cmd, val):
"""Create an extended message to set a property.
Create an extended message with:
cmd1: 0x2e
cmd2: 0x00
flags: Direct Extended
d1: group
d2: cmd
d3: val
d4 - d14: 0x00
Parameters:
prop: Property name to update
cmd: Command value
0x02: on mask
0x03: off mask
0x04: x10 house code
0x05: ramp rate
0x06: on level
0x07: LED brightness
0x08: Non-Toggle mask
0x09: LED bit mask (Do not use in this class. Use LED class)
0x0a: X10 All bit mask
0x0c: Trigger group bit mask
val: New property value
"""
user_data = Userdata({'d1': self.group,
'd2': cmd,
'd3': val})
msg = ExtendedSend(self._address,
COMMAND_EXTENDED_GET_SET_0X2E_0X00,
user_data)
msg.set_checksum()
self._set_sent_property(prop, val)
return msg | python | def _create_set_property_msg(self, prop, cmd, val):
"""Create an extended message to set a property.
Create an extended message with:
cmd1: 0x2e
cmd2: 0x00
flags: Direct Extended
d1: group
d2: cmd
d3: val
d4 - d14: 0x00
Parameters:
prop: Property name to update
cmd: Command value
0x02: on mask
0x03: off mask
0x04: x10 house code
0x05: ramp rate
0x06: on level
0x07: LED brightness
0x08: Non-Toggle mask
0x09: LED bit mask (Do not use in this class. Use LED class)
0x0a: X10 All bit mask
0x0c: Trigger group bit mask
val: New property value
"""
user_data = Userdata({'d1': self.group,
'd2': cmd,
'd3': val})
msg = ExtendedSend(self._address,
COMMAND_EXTENDED_GET_SET_0X2E_0X00,
user_data)
msg.set_checksum()
self._set_sent_property(prop, val)
return msg | [
"def",
"_create_set_property_msg",
"(",
"self",
",",
"prop",
",",
"cmd",
",",
"val",
")",
":",
"user_data",
"=",
"Userdata",
"(",
"{",
"'d1'",
":",
"self",
".",
"group",
",",
"'d2'",
":",
"cmd",
",",
"'d3'",
":",
"val",
"}",
")",
"msg",
"=",
"Exten... | Create an extended message to set a property.
Create an extended message with:
cmd1: 0x2e
cmd2: 0x00
flags: Direct Extended
d1: group
d2: cmd
d3: val
d4 - d14: 0x00
Parameters:
prop: Property name to update
cmd: Command value
0x02: on mask
0x03: off mask
0x04: x10 house code
0x05: ramp rate
0x06: on level
0x07: LED brightness
0x08: Non-Toggle mask
0x09: LED bit mask (Do not use in this class. Use LED class)
0x0a: X10 All bit mask
0x0c: Trigger group bit mask
val: New property value | [
"Create",
"an",
"extended",
"message",
"to",
"set",
"a",
"property",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L792-L828 | train | 39,414 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypadLed.on | def on(self, group):
"""Turn the LED on for a group."""
asyncio.ensure_future(self._send_led_on_off_request(group, 1),
loop=self._loop) | python | def on(self, group):
"""Turn the LED on for a group."""
asyncio.ensure_future(self._send_led_on_off_request(group, 1),
loop=self._loop) | [
"def",
"on",
"(",
"self",
",",
"group",
")",
":",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_send_led_on_off_request",
"(",
"group",
",",
"1",
")",
",",
"loop",
"=",
"self",
".",
"_loop",
")"
] | Turn the LED on for a group. | [
"Turn",
"the",
"LED",
"on",
"for",
"a",
"group",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L852-L855 | train | 39,415 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypadLed.off | def off(self, group):
"""Turn the LED off for a group."""
asyncio.ensure_future(self._send_led_on_off_request(group, 0),
loop=self._loop) | python | def off(self, group):
"""Turn the LED off for a group."""
asyncio.ensure_future(self._send_led_on_off_request(group, 0),
loop=self._loop) | [
"def",
"off",
"(",
"self",
",",
"group",
")",
":",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_send_led_on_off_request",
"(",
"group",
",",
"0",
")",
",",
"loop",
"=",
"self",
".",
"_loop",
")"
] | Turn the LED off for a group. | [
"Turn",
"the",
"LED",
"off",
"for",
"a",
"group",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L857-L860 | train | 39,416 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypadLed.register_led_updates | def register_led_updates(self, callback, button):
"""Register a callback when a specific button LED changes."""
button_callbacks = self._button_observer_callbacks.get(button)
if not button_callbacks:
self._button_observer_callbacks[button] = []
_LOGGER.debug('New callback for button %d', button)
self._button_observer_callbacks[button].append(callback) | python | def register_led_updates(self, callback, button):
"""Register a callback when a specific button LED changes."""
button_callbacks = self._button_observer_callbacks.get(button)
if not button_callbacks:
self._button_observer_callbacks[button] = []
_LOGGER.debug('New callback for button %d', button)
self._button_observer_callbacks[button].append(callback) | [
"def",
"register_led_updates",
"(",
"self",
",",
"callback",
",",
"button",
")",
":",
"button_callbacks",
"=",
"self",
".",
"_button_observer_callbacks",
".",
"get",
"(",
"button",
")",
"if",
"not",
"button_callbacks",
":",
"self",
".",
"_button_observer_callbacks... | Register a callback when a specific button LED changes. | [
"Register",
"a",
"callback",
"when",
"a",
"specific",
"button",
"LED",
"changes",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L877-L883 | train | 39,417 |
nugget/python-insteonplm | insteonplm/states/onOff.py | OnOffKeypadLed._set_led_value | def _set_led_value(self, group, val):
"""Set the LED value and confirm with a status check."""
new_bitmask = set_bit(self._value, group, bool(val))
self._set_led_bitmask(new_bitmask) | python | def _set_led_value(self, group, val):
"""Set the LED value and confirm with a status check."""
new_bitmask = set_bit(self._value, group, bool(val))
self._set_led_bitmask(new_bitmask) | [
"def",
"_set_led_value",
"(",
"self",
",",
"group",
",",
"val",
")",
":",
"new_bitmask",
"=",
"set_bit",
"(",
"self",
".",
"_value",
",",
"group",
",",
"bool",
"(",
"val",
")",
")",
"self",
".",
"_set_led_bitmask",
"(",
"new_bitmask",
")"
] | Set the LED value and confirm with a status check. | [
"Set",
"the",
"LED",
"value",
"and",
"confirm",
"with",
"a",
"status",
"check",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L916-L919 | train | 39,418 |
nugget/python-insteonplm | insteonplm/states/thermostat.py | CoolSetPoint.set | def set(self, val):
"""Set the cool set point."""
msg = ExtendedSend(
address=self._address,
commandtuple=COMMAND_THERMOSTAT_SET_COOL_SETPOINT_0X6C_NONE,
cmd2=int(val * 2),
userdata=Userdata())
msg.set_checksum()
self._send_method(msg, self._set_cool_point_ack) | python | def set(self, val):
"""Set the cool set point."""
msg = ExtendedSend(
address=self._address,
commandtuple=COMMAND_THERMOSTAT_SET_COOL_SETPOINT_0X6C_NONE,
cmd2=int(val * 2),
userdata=Userdata())
msg.set_checksum()
self._send_method(msg, self._set_cool_point_ack) | [
"def",
"set",
"(",
"self",
",",
"val",
")",
":",
"msg",
"=",
"ExtendedSend",
"(",
"address",
"=",
"self",
".",
"_address",
",",
"commandtuple",
"=",
"COMMAND_THERMOSTAT_SET_COOL_SETPOINT_0X6C_NONE",
",",
"cmd2",
"=",
"int",
"(",
"val",
"*",
"2",
")",
",",
... | Set the cool set point. | [
"Set",
"the",
"cool",
"set",
"point",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/thermostat.py#L363-L371 | train | 39,419 |
nugget/python-insteonplm | insteonplm/states/thermostat.py | HeatSetPoint.set | def set(self, val):
"""Set the heat set point."""
msg = ExtendedSend(
address=self._address,
commandtuple=COMMAND_THERMOSTAT_SET_HEAT_SETPOINT_0X6D_NONE,
cmd2=int(val * 2),
userdata=Userdata())
msg.set_checksum()
self._send_method(msg, self._set_heat_point_ack) | python | def set(self, val):
"""Set the heat set point."""
msg = ExtendedSend(
address=self._address,
commandtuple=COMMAND_THERMOSTAT_SET_HEAT_SETPOINT_0X6D_NONE,
cmd2=int(val * 2),
userdata=Userdata())
msg.set_checksum()
self._send_method(msg, self._set_heat_point_ack) | [
"def",
"set",
"(",
"self",
",",
"val",
")",
":",
"msg",
"=",
"ExtendedSend",
"(",
"address",
"=",
"self",
".",
"_address",
",",
"commandtuple",
"=",
"COMMAND_THERMOSTAT_SET_HEAT_SETPOINT_0X6D_NONE",
",",
"cmd2",
"=",
"int",
"(",
"val",
"*",
"2",
")",
",",
... | Set the heat set point. | [
"Set",
"the",
"heat",
"set",
"point",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/thermostat.py#L419-L427 | train | 39,420 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices.add_device_callback | def add_device_callback(self, callback):
"""Register a callback to be invoked when a new device appears."""
_LOGGER.debug('Added new callback %s ', callback)
self._cb_new_device.append(callback) | python | def add_device_callback(self, callback):
"""Register a callback to be invoked when a new device appears."""
_LOGGER.debug('Added new callback %s ', callback)
self._cb_new_device.append(callback) | [
"def",
"add_device_callback",
"(",
"self",
",",
"callback",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"'Added new callback %s '",
",",
"callback",
")",
"self",
".",
"_cb_new_device",
".",
"append",
"(",
"callback",
")"
] | Register a callback to be invoked when a new device appears. | [
"Register",
"a",
"callback",
"to",
"be",
"invoked",
"when",
"a",
"new",
"device",
"appears",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L93-L96 | train | 39,421 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices.add_override | def add_override(self, addr, key, value):
"""Register an attribute override for a device."""
address = Address(str(addr)).id
_LOGGER.debug('New override for %s %s is %s', address, key, value)
device_override = self._overrides.get(address, {})
device_override[key] = value
self._overrides[address] = device_override | python | def add_override(self, addr, key, value):
"""Register an attribute override for a device."""
address = Address(str(addr)).id
_LOGGER.debug('New override for %s %s is %s', address, key, value)
device_override = self._overrides.get(address, {})
device_override[key] = value
self._overrides[address] = device_override | [
"def",
"add_override",
"(",
"self",
",",
"addr",
",",
"key",
",",
"value",
")",
":",
"address",
"=",
"Address",
"(",
"str",
"(",
"addr",
")",
")",
".",
"id",
"_LOGGER",
".",
"debug",
"(",
"'New override for %s %s is %s'",
",",
"address",
",",
"key",
",... | Register an attribute override for a device. | [
"Register",
"an",
"attribute",
"override",
"for",
"a",
"device",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L98-L104 | train | 39,422 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices.create_device_from_category | def create_device_from_category(self, plm, addr, cat, subcat,
product_key=0x00):
"""Create a new device from the cat, subcat and product_key data."""
saved_device = self._saved_devices.get(Address(addr).id, {})
cat = saved_device.get('cat', cat)
subcat = saved_device.get('subcat', subcat)
product_key = saved_device.get('product_key', product_key)
device_override = self._overrides.get(Address(addr).id, {})
cat = device_override.get('cat', cat)
subcat = device_override.get('subcat', subcat)
product_key = device_override.get('firmware', product_key)
product_key = device_override.get('product_key', product_key)
return insteonplm.devices.create(plm, addr, cat, subcat, product_key) | python | def create_device_from_category(self, plm, addr, cat, subcat,
product_key=0x00):
"""Create a new device from the cat, subcat and product_key data."""
saved_device = self._saved_devices.get(Address(addr).id, {})
cat = saved_device.get('cat', cat)
subcat = saved_device.get('subcat', subcat)
product_key = saved_device.get('product_key', product_key)
device_override = self._overrides.get(Address(addr).id, {})
cat = device_override.get('cat', cat)
subcat = device_override.get('subcat', subcat)
product_key = device_override.get('firmware', product_key)
product_key = device_override.get('product_key', product_key)
return insteonplm.devices.create(plm, addr, cat, subcat, product_key) | [
"def",
"create_device_from_category",
"(",
"self",
",",
"plm",
",",
"addr",
",",
"cat",
",",
"subcat",
",",
"product_key",
"=",
"0x00",
")",
":",
"saved_device",
"=",
"self",
".",
"_saved_devices",
".",
"get",
"(",
"Address",
"(",
"addr",
")",
".",
"id",... | Create a new device from the cat, subcat and product_key data. | [
"Create",
"a",
"new",
"device",
"from",
"the",
"cat",
"subcat",
"and",
"product_key",
"data",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L106-L120 | train | 39,423 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices.has_saved | def has_saved(self, addr):
"""Test if device has data from the saved data file."""
saved = False
if self._saved_devices.get(addr, None) is not None:
saved = True
return saved | python | def has_saved(self, addr):
"""Test if device has data from the saved data file."""
saved = False
if self._saved_devices.get(addr, None) is not None:
saved = True
return saved | [
"def",
"has_saved",
"(",
"self",
",",
"addr",
")",
":",
"saved",
"=",
"False",
"if",
"self",
".",
"_saved_devices",
".",
"get",
"(",
"addr",
",",
"None",
")",
"is",
"not",
"None",
":",
"saved",
"=",
"True",
"return",
"saved"
] | Test if device has data from the saved data file. | [
"Test",
"if",
"device",
"has",
"data",
"from",
"the",
"saved",
"data",
"file",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L122-L127 | train | 39,424 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices.has_override | def has_override(self, addr):
"""Test if device has data from a device override setting."""
override = False
if self._overrides.get(addr, None) is not None:
override = True
return override | python | def has_override(self, addr):
"""Test if device has data from a device override setting."""
override = False
if self._overrides.get(addr, None) is not None:
override = True
return override | [
"def",
"has_override",
"(",
"self",
",",
"addr",
")",
":",
"override",
"=",
"False",
"if",
"self",
".",
"_overrides",
".",
"get",
"(",
"addr",
",",
"None",
")",
"is",
"not",
"None",
":",
"override",
"=",
"True",
"return",
"override"
] | Test if device has data from a device override setting. | [
"Test",
"if",
"device",
"has",
"data",
"from",
"a",
"device",
"override",
"setting",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L129-L134 | train | 39,425 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices.add_known_devices | def add_known_devices(self, plm):
"""Add devices from the saved devices or from the device overrides."""
from insteonplm.devices import ALDBStatus
for addr in self._saved_devices:
if not self._devices.get(addr):
saved_device = self._saved_devices.get(Address(addr).id, {})
cat = saved_device.get('cat')
subcat = saved_device.get('subcat')
product_key = saved_device.get('firmware')
product_key = saved_device.get('product_key', product_key)
device = self.create_device_from_category(
plm, addr, cat, subcat, product_key)
if device:
_LOGGER.debug('Device with id %s added to device list '
'from saved device data.', addr)
aldb_status = saved_device.get('aldb_status', 0)
device.aldb.status = ALDBStatus(aldb_status)
aldb = saved_device.get('aldb', {})
device.aldb.load_saved_records(aldb_status, aldb)
self[addr] = device
for addr in self._overrides:
if not self._devices.get(addr):
device_override = self._overrides.get(Address(addr).id, {})
cat = device_override.get('cat')
subcat = device_override.get('subcat')
product_key = device_override.get('firmware')
product_key = device_override.get('product_key', product_key)
device = self.create_device_from_category(
plm, addr, cat, subcat, product_key)
if device:
_LOGGER.debug('Device with id %s added to device list '
'from device override data.', addr)
self[addr] = device | python | def add_known_devices(self, plm):
"""Add devices from the saved devices or from the device overrides."""
from insteonplm.devices import ALDBStatus
for addr in self._saved_devices:
if not self._devices.get(addr):
saved_device = self._saved_devices.get(Address(addr).id, {})
cat = saved_device.get('cat')
subcat = saved_device.get('subcat')
product_key = saved_device.get('firmware')
product_key = saved_device.get('product_key', product_key)
device = self.create_device_from_category(
plm, addr, cat, subcat, product_key)
if device:
_LOGGER.debug('Device with id %s added to device list '
'from saved device data.', addr)
aldb_status = saved_device.get('aldb_status', 0)
device.aldb.status = ALDBStatus(aldb_status)
aldb = saved_device.get('aldb', {})
device.aldb.load_saved_records(aldb_status, aldb)
self[addr] = device
for addr in self._overrides:
if not self._devices.get(addr):
device_override = self._overrides.get(Address(addr).id, {})
cat = device_override.get('cat')
subcat = device_override.get('subcat')
product_key = device_override.get('firmware')
product_key = device_override.get('product_key', product_key)
device = self.create_device_from_category(
plm, addr, cat, subcat, product_key)
if device:
_LOGGER.debug('Device with id %s added to device list '
'from device override data.', addr)
self[addr] = device | [
"def",
"add_known_devices",
"(",
"self",
",",
"plm",
")",
":",
"from",
"insteonplm",
".",
"devices",
"import",
"ALDBStatus",
"for",
"addr",
"in",
"self",
".",
"_saved_devices",
":",
"if",
"not",
"self",
".",
"_devices",
".",
"get",
"(",
"addr",
")",
":",... | Add devices from the saved devices or from the device overrides. | [
"Add",
"devices",
"from",
"the",
"saved",
"devices",
"or",
"from",
"the",
"device",
"overrides",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L136-L168 | train | 39,426 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices.save_device_info | def save_device_info(self):
"""Save all device information to the device info file."""
if self._workdir is not None:
devices = []
for addr in self._devices:
device = self._devices.get(addr)
if not device.address.is_x10:
aldb = {}
for mem in device.aldb:
rec = device.aldb[mem]
if rec:
aldbRec = {'memory': mem,
'control_flags': rec.control_flags.byte,
'group': rec.group,
'address': rec.address.id,
'data1': rec.data1,
'data2': rec.data2,
'data3': rec.data3}
aldb[mem] = aldbRec
deviceInfo = {'address': device.address.id,
'cat': device.cat,
'subcat': device.subcat,
'product_key': device.product_key,
'aldb_status': device.aldb.status.value,
'aldb': aldb}
devices.append(deviceInfo)
asyncio.ensure_future(self._write_saved_device_info(devices),
loop=self._loop) | python | def save_device_info(self):
"""Save all device information to the device info file."""
if self._workdir is not None:
devices = []
for addr in self._devices:
device = self._devices.get(addr)
if not device.address.is_x10:
aldb = {}
for mem in device.aldb:
rec = device.aldb[mem]
if rec:
aldbRec = {'memory': mem,
'control_flags': rec.control_flags.byte,
'group': rec.group,
'address': rec.address.id,
'data1': rec.data1,
'data2': rec.data2,
'data3': rec.data3}
aldb[mem] = aldbRec
deviceInfo = {'address': device.address.id,
'cat': device.cat,
'subcat': device.subcat,
'product_key': device.product_key,
'aldb_status': device.aldb.status.value,
'aldb': aldb}
devices.append(deviceInfo)
asyncio.ensure_future(self._write_saved_device_info(devices),
loop=self._loop) | [
"def",
"save_device_info",
"(",
"self",
")",
":",
"if",
"self",
".",
"_workdir",
"is",
"not",
"None",
":",
"devices",
"=",
"[",
"]",
"for",
"addr",
"in",
"self",
".",
"_devices",
":",
"device",
"=",
"self",
".",
"_devices",
".",
"get",
"(",
"addr",
... | Save all device information to the device info file. | [
"Save",
"all",
"device",
"information",
"to",
"the",
"device",
"info",
"file",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L171-L198 | train | 39,427 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices._add_saved_device_info | def _add_saved_device_info(self, **kwarg):
"""Register device info from the saved data file."""
addr = kwarg.get('address')
_LOGGER.debug('Found saved device with address %s', addr)
self._saved_devices[addr] = kwarg | python | def _add_saved_device_info(self, **kwarg):
"""Register device info from the saved data file."""
addr = kwarg.get('address')
_LOGGER.debug('Found saved device with address %s', addr)
self._saved_devices[addr] = kwarg | [
"def",
"_add_saved_device_info",
"(",
"self",
",",
"*",
"*",
"kwarg",
")",
":",
"addr",
"=",
"kwarg",
".",
"get",
"(",
"'address'",
")",
"_LOGGER",
".",
"debug",
"(",
"'Found saved device with address %s'",
",",
"addr",
")",
"self",
".",
"_saved_devices",
"[... | Register device info from the saved data file. | [
"Register",
"device",
"info",
"from",
"the",
"saved",
"data",
"file",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L200-L204 | train | 39,428 |
nugget/python-insteonplm | insteonplm/linkedDevices.py | LinkedDevices.load_saved_device_info | async def load_saved_device_info(self):
"""Load device information from the device info file."""
_LOGGER.debug("Loading saved device info.")
deviceinfo = []
if self._workdir:
_LOGGER.debug("Really Loading saved device info.")
try:
device_file = '{}/{}'.format(self._workdir, DEVICE_INFO_FILE)
with open(device_file, 'r') as infile:
try:
deviceinfo = json.load(infile)
_LOGGER.debug("Saved device file loaded")
except json.decoder.JSONDecodeError:
_LOGGER.debug("Loading saved device file failed")
except FileNotFoundError:
_LOGGER.debug("Saved device file not found")
for device in deviceinfo:
self._add_saved_device_info(**device) | python | async def load_saved_device_info(self):
"""Load device information from the device info file."""
_LOGGER.debug("Loading saved device info.")
deviceinfo = []
if self._workdir:
_LOGGER.debug("Really Loading saved device info.")
try:
device_file = '{}/{}'.format(self._workdir, DEVICE_INFO_FILE)
with open(device_file, 'r') as infile:
try:
deviceinfo = json.load(infile)
_LOGGER.debug("Saved device file loaded")
except json.decoder.JSONDecodeError:
_LOGGER.debug("Loading saved device file failed")
except FileNotFoundError:
_LOGGER.debug("Saved device file not found")
for device in deviceinfo:
self._add_saved_device_info(**device) | [
"async",
"def",
"load_saved_device_info",
"(",
"self",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Loading saved device info.\"",
")",
"deviceinfo",
"=",
"[",
"]",
"if",
"self",
".",
"_workdir",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Really Loading saved device info... | Load device information from the device info file. | [
"Load",
"device",
"information",
"from",
"the",
"device",
"info",
"file",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/linkedDevices.py#L206-L223 | train | 39,429 |
nugget/python-insteonplm | insteonplm/messages/x10received.py | X10Received.unit_code_msg | def unit_code_msg(housecode, unitcode):
"""Create an X10 message to send the house code and unit code."""
house_byte = 0
unit_byte = 0
if isinstance(housecode, str):
house_byte = insteonplm.utils.housecode_to_byte(housecode) << 4
unit_byte = insteonplm.utils.unitcode_to_byte(unitcode)
elif isinstance(housecode, int) and housecode < 16:
house_byte = housecode << 4
unit_byte = unitcode
else:
house_byte = housecode
unit_byte = unitcode
return X10Received(house_byte + unit_byte, 0x00) | python | def unit_code_msg(housecode, unitcode):
"""Create an X10 message to send the house code and unit code."""
house_byte = 0
unit_byte = 0
if isinstance(housecode, str):
house_byte = insteonplm.utils.housecode_to_byte(housecode) << 4
unit_byte = insteonplm.utils.unitcode_to_byte(unitcode)
elif isinstance(housecode, int) and housecode < 16:
house_byte = housecode << 4
unit_byte = unitcode
else:
house_byte = housecode
unit_byte = unitcode
return X10Received(house_byte + unit_byte, 0x00) | [
"def",
"unit_code_msg",
"(",
"housecode",
",",
"unitcode",
")",
":",
"house_byte",
"=",
"0",
"unit_byte",
"=",
"0",
"if",
"isinstance",
"(",
"housecode",
",",
"str",
")",
":",
"house_byte",
"=",
"insteonplm",
".",
"utils",
".",
"housecode_to_byte",
"(",
"h... | Create an X10 message to send the house code and unit code. | [
"Create",
"an",
"X10",
"message",
"to",
"send",
"the",
"house",
"code",
"and",
"unit",
"code",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/messages/x10received.py#L40-L53 | train | 39,430 |
nugget/python-insteonplm | insteonplm/messages/x10received.py | X10Received.command_msg | def command_msg(housecode, command):
"""Create an X10 message to send the house code and a command code."""
house_byte = 0
if isinstance(housecode, str):
house_byte = insteonplm.utils.housecode_to_byte(housecode) << 4
elif isinstance(housecode, int) and housecode < 16:
house_byte = housecode << 4
else:
house_byte = housecode
return X10Received(house_byte + command, 0x80) | python | def command_msg(housecode, command):
"""Create an X10 message to send the house code and a command code."""
house_byte = 0
if isinstance(housecode, str):
house_byte = insteonplm.utils.housecode_to_byte(housecode) << 4
elif isinstance(housecode, int) and housecode < 16:
house_byte = housecode << 4
else:
house_byte = housecode
return X10Received(house_byte + command, 0x80) | [
"def",
"command_msg",
"(",
"housecode",
",",
"command",
")",
":",
"house_byte",
"=",
"0",
"if",
"isinstance",
"(",
"housecode",
",",
"str",
")",
":",
"house_byte",
"=",
"insteonplm",
".",
"utils",
".",
"housecode_to_byte",
"(",
"housecode",
")",
"<<",
"4",... | Create an X10 message to send the house code and a command code. | [
"Create",
"an",
"X10",
"message",
"to",
"send",
"the",
"house",
"code",
"and",
"a",
"command",
"code",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/messages/x10received.py#L56-L65 | train | 39,431 |
nugget/python-insteonplm | insteonplm/__init__.py | create_http_connection | async def create_http_connection(loop, protocol_factory, host, port=25105,
auth=None):
"""Create an HTTP session used to connect to the Insteon Hub."""
protocol = protocol_factory()
transport = HttpTransport(loop, protocol, host, port, auth)
_LOGGER.debug("create_http_connection Finished creating connection")
return (transport, protocol) | python | async def create_http_connection(loop, protocol_factory, host, port=25105,
auth=None):
"""Create an HTTP session used to connect to the Insteon Hub."""
protocol = protocol_factory()
transport = HttpTransport(loop, protocol, host, port, auth)
_LOGGER.debug("create_http_connection Finished creating connection")
return (transport, protocol) | [
"async",
"def",
"create_http_connection",
"(",
"loop",
",",
"protocol_factory",
",",
"host",
",",
"port",
"=",
"25105",
",",
"auth",
"=",
"None",
")",
":",
"protocol",
"=",
"protocol_factory",
"(",
")",
"transport",
"=",
"HttpTransport",
"(",
"loop",
",",
... | Create an HTTP session used to connect to the Insteon Hub. | [
"Create",
"an",
"HTTP",
"session",
"used",
"to",
"connect",
"to",
"the",
"Insteon",
"Hub",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/__init__.py#L21-L27 | train | 39,432 |
nugget/python-insteonplm | insteonplm/__init__.py | Connection.create | async def create(cls, device='/dev/ttyUSB0', host=None,
username=None, password=None, port=25010, hub_version=2,
auto_reconnect=True, loop=None, workdir=None,
poll_devices=True, load_aldb=True):
"""Create a connection to a specific device.
Here is where we supply the device and callback callables we
expect for this PLM class object.
:param device:
Unix device where the PLM is attached
:param address:
IP Address of the Hub
:param username:
User name for connecting to the Hub
:param password:
Password for connecting to the Hub
:param auto_reconnect:
Should the Connection try to automatically reconnect if needed?
:param loop:
asyncio.loop for async operation
:param load_aldb:
Should the ALDB be loaded on connect
:type device:
str
:type auto_reconnect:
boolean
:type loop:
asyncio.loop
:type update_callback:
callable
"""
_LOGGER.debug("Starting Modified Connection.create")
conn = cls(device=device, host=host, username=username,
password=password, port=port, hub_version=hub_version,
loop=loop, retry_interval=1, auto_reconnect=auto_reconnect)
def connection_lost():
"""Respond to Protocol connection lost."""
if conn.auto_reconnect and not conn.closing:
_LOGGER.debug("Reconnecting to transport")
asyncio.ensure_future(conn.reconnect(), loop=conn.loop)
protocol_class = PLM
if conn.host and conn.hub_version == 2:
protocol_class = Hub
conn.protocol = protocol_class(
connection_lost_callback=connection_lost,
loop=conn.loop,
workdir=workdir,
poll_devices=poll_devices,
load_aldb=load_aldb)
await conn.reconnect()
_LOGGER.debug("Ending Connection.create")
return conn | python | async def create(cls, device='/dev/ttyUSB0', host=None,
username=None, password=None, port=25010, hub_version=2,
auto_reconnect=True, loop=None, workdir=None,
poll_devices=True, load_aldb=True):
"""Create a connection to a specific device.
Here is where we supply the device and callback callables we
expect for this PLM class object.
:param device:
Unix device where the PLM is attached
:param address:
IP Address of the Hub
:param username:
User name for connecting to the Hub
:param password:
Password for connecting to the Hub
:param auto_reconnect:
Should the Connection try to automatically reconnect if needed?
:param loop:
asyncio.loop for async operation
:param load_aldb:
Should the ALDB be loaded on connect
:type device:
str
:type auto_reconnect:
boolean
:type loop:
asyncio.loop
:type update_callback:
callable
"""
_LOGGER.debug("Starting Modified Connection.create")
conn = cls(device=device, host=host, username=username,
password=password, port=port, hub_version=hub_version,
loop=loop, retry_interval=1, auto_reconnect=auto_reconnect)
def connection_lost():
"""Respond to Protocol connection lost."""
if conn.auto_reconnect and not conn.closing:
_LOGGER.debug("Reconnecting to transport")
asyncio.ensure_future(conn.reconnect(), loop=conn.loop)
protocol_class = PLM
if conn.host and conn.hub_version == 2:
protocol_class = Hub
conn.protocol = protocol_class(
connection_lost_callback=connection_lost,
loop=conn.loop,
workdir=workdir,
poll_devices=poll_devices,
load_aldb=load_aldb)
await conn.reconnect()
_LOGGER.debug("Ending Connection.create")
return conn | [
"async",
"def",
"create",
"(",
"cls",
",",
"device",
"=",
"'/dev/ttyUSB0'",
",",
"host",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"port",
"=",
"25010",
",",
"hub_version",
"=",
"2",
",",
"auto_reconnect",
"=",
"True... | Create a connection to a specific device.
Here is where we supply the device and callback callables we
expect for this PLM class object.
:param device:
Unix device where the PLM is attached
:param address:
IP Address of the Hub
:param username:
User name for connecting to the Hub
:param password:
Password for connecting to the Hub
:param auto_reconnect:
Should the Connection try to automatically reconnect if needed?
:param loop:
asyncio.loop for async operation
:param load_aldb:
Should the ALDB be loaded on connect
:type device:
str
:type auto_reconnect:
boolean
:type loop:
asyncio.loop
:type update_callback:
callable | [
"Create",
"a",
"connection",
"to",
"a",
"specific",
"device",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/__init__.py#L113-L170 | train | 39,433 |
nugget/python-insteonplm | insteonplm/__init__.py | Connection.reconnect | async def reconnect(self):
"""Reconnect to the modem."""
_LOGGER.debug('starting Connection.reconnect')
await self._connect()
while self._closed:
await self._retry_connection()
_LOGGER.debug('ending Connection.reconnect') | python | async def reconnect(self):
"""Reconnect to the modem."""
_LOGGER.debug('starting Connection.reconnect')
await self._connect()
while self._closed:
await self._retry_connection()
_LOGGER.debug('ending Connection.reconnect') | [
"async",
"def",
"reconnect",
"(",
"self",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"'starting Connection.reconnect'",
")",
"await",
"self",
".",
"_connect",
"(",
")",
"while",
"self",
".",
"_closed",
":",
"await",
"self",
".",
"_retry_connection",
"(",
")",
... | Reconnect to the modem. | [
"Reconnect",
"to",
"the",
"modem",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/__init__.py#L187-L193 | train | 39,434 |
nugget/python-insteonplm | insteonplm/__init__.py | Connection.close | async def close(self, event):
"""Close the PLM device connection and don't try to reconnect."""
_LOGGER.info('Closing connection to Insteon Modem')
self._closing = True
self._auto_reconnect = False
await self.protocol.close()
if self.protocol.transport:
self.protocol.transport.close()
await asyncio.sleep(0, loop=self._loop)
_LOGGER.info('Insteon Modem connection closed') | python | async def close(self, event):
"""Close the PLM device connection and don't try to reconnect."""
_LOGGER.info('Closing connection to Insteon Modem')
self._closing = True
self._auto_reconnect = False
await self.protocol.close()
if self.protocol.transport:
self.protocol.transport.close()
await asyncio.sleep(0, loop=self._loop)
_LOGGER.info('Insteon Modem connection closed') | [
"async",
"def",
"close",
"(",
"self",
",",
"event",
")",
":",
"_LOGGER",
".",
"info",
"(",
"'Closing connection to Insteon Modem'",
")",
"self",
".",
"_closing",
"=",
"True",
"self",
".",
"_auto_reconnect",
"=",
"False",
"await",
"self",
".",
"protocol",
"."... | Close the PLM device connection and don't try to reconnect. | [
"Close",
"the",
"PLM",
"device",
"connection",
"and",
"don",
"t",
"try",
"to",
"reconnect",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/__init__.py#L196-L205 | train | 39,435 |
nugget/python-insteonplm | insteonplm/__init__.py | Connection.dump_conndata | def dump_conndata(self):
"""Developer tool for debugging forensics."""
attrs = vars(self)
return ', '.join("%s: %s" % item for item in attrs.items()) | python | def dump_conndata(self):
"""Developer tool for debugging forensics."""
attrs = vars(self)
return ', '.join("%s: %s" % item for item in attrs.items()) | [
"def",
"dump_conndata",
"(",
"self",
")",
":",
"attrs",
"=",
"vars",
"(",
"self",
")",
"return",
"', '",
".",
"join",
"(",
"\"%s: %s\"",
"%",
"item",
"for",
"item",
"in",
"attrs",
".",
"items",
"(",
")",
")"
] | Developer tool for debugging forensics. | [
"Developer",
"tool",
"for",
"debugging",
"forensics",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/__init__.py#L220-L223 | train | 39,436 |
nugget/python-insteonplm | insteonplm/states/x10.py | X10OnOffSwitch.on | def on(self):
"""Send the On command to an X10 device."""
msg = X10Send.unit_code_msg(self.address.x10_housecode,
self.address.x10_unitcode)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode,
X10_COMMAND_ON)
self._send_method(msg, False)
self._update_subscribers(0xff) | python | def on(self):
"""Send the On command to an X10 device."""
msg = X10Send.unit_code_msg(self.address.x10_housecode,
self.address.x10_unitcode)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode,
X10_COMMAND_ON)
self._send_method(msg, False)
self._update_subscribers(0xff) | [
"def",
"on",
"(",
"self",
")",
":",
"msg",
"=",
"X10Send",
".",
"unit_code_msg",
"(",
"self",
".",
"address",
".",
"x10_housecode",
",",
"self",
".",
"address",
".",
"x10_unitcode",
")",
"self",
".",
"_send_method",
"(",
"msg",
")",
"msg",
"=",
"X10Sen... | Send the On command to an X10 device. | [
"Send",
"the",
"On",
"command",
"to",
"an",
"X10",
"device",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/x10.py#L30-L39 | train | 39,437 |
nugget/python-insteonplm | insteonplm/states/x10.py | X10OnOffSwitch.off | def off(self):
"""Send the Off command to an X10 device."""
msg = X10Send.unit_code_msg(self.address.x10_housecode,
self.address.x10_unitcode)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode,
X10_COMMAND_OFF)
self._send_method(msg, False)
self._update_subscribers(0x00) | python | def off(self):
"""Send the Off command to an X10 device."""
msg = X10Send.unit_code_msg(self.address.x10_housecode,
self.address.x10_unitcode)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode,
X10_COMMAND_OFF)
self._send_method(msg, False)
self._update_subscribers(0x00) | [
"def",
"off",
"(",
"self",
")",
":",
"msg",
"=",
"X10Send",
".",
"unit_code_msg",
"(",
"self",
".",
"address",
".",
"x10_housecode",
",",
"self",
".",
"address",
".",
"x10_unitcode",
")",
"self",
".",
"_send_method",
"(",
"msg",
")",
"msg",
"=",
"X10Se... | Send the Off command to an X10 device. | [
"Send",
"the",
"Off",
"command",
"to",
"an",
"X10",
"device",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/x10.py#L41-L50 | train | 39,438 |
nugget/python-insteonplm | insteonplm/states/x10.py | X10DimmableSwitch.set_level | def set_level(self, val):
"""Set the device ON LEVEL."""
if val == 0:
self.off()
elif val == 255:
self.on()
else:
setlevel = 255
if val < 1:
setlevel = val * 255
elif val <= 0xff:
setlevel = val
change = setlevel - self._value
increment = 255 / self._steps
steps = round(abs(change) / increment)
print('Steps: ', steps)
if change > 0:
method = self.brighten
self._value += round(steps * increment)
self._value = min(255, self._value)
else:
method = self.dim
self._value -= round(steps * increment)
self._value = max(0, self._value)
# pylint: disable=unused-variable
for step in range(0, steps):
method(True)
self._update_subscribers(self._value) | python | def set_level(self, val):
"""Set the device ON LEVEL."""
if val == 0:
self.off()
elif val == 255:
self.on()
else:
setlevel = 255
if val < 1:
setlevel = val * 255
elif val <= 0xff:
setlevel = val
change = setlevel - self._value
increment = 255 / self._steps
steps = round(abs(change) / increment)
print('Steps: ', steps)
if change > 0:
method = self.brighten
self._value += round(steps * increment)
self._value = min(255, self._value)
else:
method = self.dim
self._value -= round(steps * increment)
self._value = max(0, self._value)
# pylint: disable=unused-variable
for step in range(0, steps):
method(True)
self._update_subscribers(self._value) | [
"def",
"set_level",
"(",
"self",
",",
"val",
")",
":",
"if",
"val",
"==",
"0",
":",
"self",
".",
"off",
"(",
")",
"elif",
"val",
"==",
"255",
":",
"self",
".",
"on",
"(",
")",
"else",
":",
"setlevel",
"=",
"255",
"if",
"val",
"<",
"1",
":",
... | Set the device ON LEVEL. | [
"Set",
"the",
"device",
"ON",
"LEVEL",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/x10.py#L107-L134 | train | 39,439 |
nugget/python-insteonplm | insteonplm/address.py | Address.matches_pattern | def matches_pattern(self, other):
"""Test Address object matches the pattern of another object."""
matches = False
if hasattr(other, 'addr'):
if self.addr is None or other.addr is None:
matches = True
else:
matches = self.addr == other.addr
return matches | python | def matches_pattern(self, other):
"""Test Address object matches the pattern of another object."""
matches = False
if hasattr(other, 'addr'):
if self.addr is None or other.addr is None:
matches = True
else:
matches = self.addr == other.addr
return matches | [
"def",
"matches_pattern",
"(",
"self",
",",
"other",
")",
":",
"matches",
"=",
"False",
"if",
"hasattr",
"(",
"other",
",",
"'addr'",
")",
":",
"if",
"self",
".",
"addr",
"is",
"None",
"or",
"other",
".",
"addr",
"is",
"None",
":",
"matches",
"=",
... | Test Address object matches the pattern of another object. | [
"Test",
"Address",
"object",
"matches",
"the",
"pattern",
"of",
"another",
"object",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L57-L65 | train | 39,440 |
nugget/python-insteonplm | insteonplm/address.py | Address._normalize | def _normalize(self, addr):
"""Take any format of address and turn it into a hex string."""
normalize = None
if isinstance(addr, Address):
normalize = addr.addr
self._is_x10 = addr.is_x10
elif isinstance(addr, bytearray):
normalize = binascii.unhexlify(binascii.hexlify(addr).decode())
elif isinstance(addr, bytes):
normalize = addr
elif isinstance(addr, str):
addr = addr.replace('.', '')
addr = addr[0:6]
if addr[0:3].lower() == 'x10':
x10_addr = Address.x10(addr[3:4], int(addr[4:6]))
normalize = x10_addr.addr
self._is_x10 = True
else:
normalize = binascii.unhexlify(addr.lower())
elif addr is None:
normalize = None
else:
_LOGGER.warning('Address class init with unknown type %s: %r',
type(addr), addr)
return normalize | python | def _normalize(self, addr):
"""Take any format of address and turn it into a hex string."""
normalize = None
if isinstance(addr, Address):
normalize = addr.addr
self._is_x10 = addr.is_x10
elif isinstance(addr, bytearray):
normalize = binascii.unhexlify(binascii.hexlify(addr).decode())
elif isinstance(addr, bytes):
normalize = addr
elif isinstance(addr, str):
addr = addr.replace('.', '')
addr = addr[0:6]
if addr[0:3].lower() == 'x10':
x10_addr = Address.x10(addr[3:4], int(addr[4:6]))
normalize = x10_addr.addr
self._is_x10 = True
else:
normalize = binascii.unhexlify(addr.lower())
elif addr is None:
normalize = None
else:
_LOGGER.warning('Address class init with unknown type %s: %r',
type(addr), addr)
return normalize | [
"def",
"_normalize",
"(",
"self",
",",
"addr",
")",
":",
"normalize",
"=",
"None",
"if",
"isinstance",
"(",
"addr",
",",
"Address",
")",
":",
"normalize",
"=",
"addr",
".",
"addr",
"self",
".",
"_is_x10",
"=",
"addr",
".",
"is_x10",
"elif",
"isinstance... | Take any format of address and turn it into a hex string. | [
"Take",
"any",
"format",
"of",
"address",
"and",
"turn",
"it",
"into",
"a",
"hex",
"string",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L67-L96 | train | 39,441 |
nugget/python-insteonplm | insteonplm/address.py | Address.id | def id(self):
"""Return the ID of the device address."""
dev_id = ''
if self._is_x10:
dev_id = 'x10{}{:02d}'.format(self.x10_housecode,
self.x10_unitcode)
else:
dev_id = self.hex
return dev_id | python | def id(self):
"""Return the ID of the device address."""
dev_id = ''
if self._is_x10:
dev_id = 'x10{}{:02d}'.format(self.x10_housecode,
self.x10_unitcode)
else:
dev_id = self.hex
return dev_id | [
"def",
"id",
"(",
"self",
")",
":",
"dev_id",
"=",
"''",
"if",
"self",
".",
"_is_x10",
":",
"dev_id",
"=",
"'x10{}{:02d}'",
".",
"format",
"(",
"self",
".",
"x10_housecode",
",",
"self",
".",
"x10_unitcode",
")",
"else",
":",
"dev_id",
"=",
"self",
"... | Return the ID of the device address. | [
"Return",
"the",
"ID",
"of",
"the",
"device",
"address",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L132-L140 | train | 39,442 |
nugget/python-insteonplm | insteonplm/address.py | Address.x10_housecode | def x10_housecode(self):
"""Emit the X10 house code."""
housecode = None
if self.is_x10:
housecode = insteonplm.utils.byte_to_housecode(self.addr[1])
return housecode | python | def x10_housecode(self):
"""Emit the X10 house code."""
housecode = None
if self.is_x10:
housecode = insteonplm.utils.byte_to_housecode(self.addr[1])
return housecode | [
"def",
"x10_housecode",
"(",
"self",
")",
":",
"housecode",
"=",
"None",
"if",
"self",
".",
"is_x10",
":",
"housecode",
"=",
"insteonplm",
".",
"utils",
".",
"byte_to_housecode",
"(",
"self",
".",
"addr",
"[",
"1",
"]",
")",
"return",
"housecode"
] | Emit the X10 house code. | [
"Emit",
"the",
"X10",
"house",
"code",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L169-L174 | train | 39,443 |
nugget/python-insteonplm | insteonplm/address.py | Address.x10_unitcode | def x10_unitcode(self):
"""Emit the X10 unit code."""
unitcode = None
if self.is_x10:
unitcode = insteonplm.utils.byte_to_unitcode(self.addr[2])
return unitcode | python | def x10_unitcode(self):
"""Emit the X10 unit code."""
unitcode = None
if self.is_x10:
unitcode = insteonplm.utils.byte_to_unitcode(self.addr[2])
return unitcode | [
"def",
"x10_unitcode",
"(",
"self",
")",
":",
"unitcode",
"=",
"None",
"if",
"self",
".",
"is_x10",
":",
"unitcode",
"=",
"insteonplm",
".",
"utils",
".",
"byte_to_unitcode",
"(",
"self",
".",
"addr",
"[",
"2",
"]",
")",
"return",
"unitcode"
] | Emit the X10 unit code. | [
"Emit",
"the",
"X10",
"unit",
"code",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L177-L182 | train | 39,444 |
nugget/python-insteonplm | insteonplm/address.py | Address.x10 | def x10(cls, housecode, unitcode):
"""Create an X10 device address."""
if housecode.lower() in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']:
byte_housecode = insteonplm.utils.housecode_to_byte(housecode)
else:
if isinstance(housecode, str):
_LOGGER.error('X10 house code error: %s', housecode)
else:
_LOGGER.error('X10 house code is not a string')
raise ValueError
# 20, 21 and 22 for All Units Off, All Lights On and All Lights Off
# 'fake' units
if unitcode in range(1, 17) or unitcode in range(20, 23):
byte_unitcode = insteonplm.utils.unitcode_to_byte(unitcode)
else:
if isinstance(unitcode, int):
_LOGGER.error('X10 unit code error: %d', unitcode)
else:
_LOGGER.error('X10 unit code is not an integer 1 - 16')
raise ValueError
addr = Address(bytearray([0x00, byte_housecode, byte_unitcode]))
addr.is_x10 = True
return addr | python | def x10(cls, housecode, unitcode):
"""Create an X10 device address."""
if housecode.lower() in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']:
byte_housecode = insteonplm.utils.housecode_to_byte(housecode)
else:
if isinstance(housecode, str):
_LOGGER.error('X10 house code error: %s', housecode)
else:
_LOGGER.error('X10 house code is not a string')
raise ValueError
# 20, 21 and 22 for All Units Off, All Lights On and All Lights Off
# 'fake' units
if unitcode in range(1, 17) or unitcode in range(20, 23):
byte_unitcode = insteonplm.utils.unitcode_to_byte(unitcode)
else:
if isinstance(unitcode, int):
_LOGGER.error('X10 unit code error: %d', unitcode)
else:
_LOGGER.error('X10 unit code is not an integer 1 - 16')
raise ValueError
addr = Address(bytearray([0x00, byte_housecode, byte_unitcode]))
addr.is_x10 = True
return addr | [
"def",
"x10",
"(",
"cls",
",",
"housecode",
",",
"unitcode",
")",
":",
"if",
"housecode",
".",
"lower",
"(",
")",
"in",
"[",
"'a'",
",",
"'b'",
",",
"'c'",
",",
"'d'",
",",
"'e'",
",",
"'f'",
",",
"'g'",
",",
"'h'",
",",
"'i'",
",",
"'j'",
",... | Create an X10 device address. | [
"Create",
"an",
"X10",
"device",
"address",
"."
] | 65548041f1b0729ae1ae904443dd81b0c6cbf1bf | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L185-L210 | train | 39,445 |
notanumber/xapian-haystack | xapian_backend.py | _term_to_xapian_value | def _term_to_xapian_value(term, field_type):
"""
Converts a term to a serialized
Xapian value based on the field_type.
"""
assert field_type in FIELD_TYPES
def strf(dt):
"""
Equivalent to datetime.datetime.strptime(dt, DATETIME_FORMAT)
but accepts years below 1900 (see http://stackoverflow.com/q/10263956/931303)
"""
return '%04d%02d%02d%02d%02d%02d' % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
if field_type == 'boolean':
assert isinstance(term, bool)
if term:
value = 't'
else:
value = 'f'
elif field_type == 'integer':
value = INTEGER_FORMAT % term
elif field_type == 'float':
value = xapian.sortable_serialise(term)
elif field_type == 'date' or field_type == 'datetime':
if field_type == 'date':
# http://stackoverflow.com/a/1937636/931303 and comments
term = datetime.datetime.combine(term, datetime.time())
value = strf(term)
else: # field_type == 'text'
value = _to_xapian_term(term)
return value | python | def _term_to_xapian_value(term, field_type):
"""
Converts a term to a serialized
Xapian value based on the field_type.
"""
assert field_type in FIELD_TYPES
def strf(dt):
"""
Equivalent to datetime.datetime.strptime(dt, DATETIME_FORMAT)
but accepts years below 1900 (see http://stackoverflow.com/q/10263956/931303)
"""
return '%04d%02d%02d%02d%02d%02d' % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
if field_type == 'boolean':
assert isinstance(term, bool)
if term:
value = 't'
else:
value = 'f'
elif field_type == 'integer':
value = INTEGER_FORMAT % term
elif field_type == 'float':
value = xapian.sortable_serialise(term)
elif field_type == 'date' or field_type == 'datetime':
if field_type == 'date':
# http://stackoverflow.com/a/1937636/931303 and comments
term = datetime.datetime.combine(term, datetime.time())
value = strf(term)
else: # field_type == 'text'
value = _to_xapian_term(term)
return value | [
"def",
"_term_to_xapian_value",
"(",
"term",
",",
"field_type",
")",
":",
"assert",
"field_type",
"in",
"FIELD_TYPES",
"def",
"strf",
"(",
"dt",
")",
":",
"\"\"\"\n Equivalent to datetime.datetime.strptime(dt, DATETIME_FORMAT)\n but accepts years below 1900 (see htt... | Converts a term to a serialized
Xapian value based on the field_type. | [
"Converts",
"a",
"term",
"to",
"a",
"serialized",
"Xapian",
"value",
"based",
"on",
"the",
"field_type",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1588-L1622 | train | 39,446 |
notanumber/xapian-haystack | xapian_backend.py | _from_xapian_value | def _from_xapian_value(value, field_type):
"""
Converts a serialized Xapian value
to Python equivalent based on the field_type.
Doesn't accept multivalued fields.
"""
assert field_type in FIELD_TYPES
if field_type == 'boolean':
if value == 't':
return True
elif value == 'f':
return False
else:
InvalidIndexError('Field type "%d" does not accept value "%s"' % (field_type, value))
elif field_type == 'integer':
return int(value)
elif field_type == 'float':
return xapian.sortable_unserialise(value)
elif field_type == 'date' or field_type == 'datetime':
datetime_value = datetime.datetime.strptime(value, DATETIME_FORMAT)
if field_type == 'datetime':
return datetime_value
else:
return datetime_value.date()
else: # field_type == 'text'
return value | python | def _from_xapian_value(value, field_type):
"""
Converts a serialized Xapian value
to Python equivalent based on the field_type.
Doesn't accept multivalued fields.
"""
assert field_type in FIELD_TYPES
if field_type == 'boolean':
if value == 't':
return True
elif value == 'f':
return False
else:
InvalidIndexError('Field type "%d" does not accept value "%s"' % (field_type, value))
elif field_type == 'integer':
return int(value)
elif field_type == 'float':
return xapian.sortable_unserialise(value)
elif field_type == 'date' or field_type == 'datetime':
datetime_value = datetime.datetime.strptime(value, DATETIME_FORMAT)
if field_type == 'datetime':
return datetime_value
else:
return datetime_value.date()
else: # field_type == 'text'
return value | [
"def",
"_from_xapian_value",
"(",
"value",
",",
"field_type",
")",
":",
"assert",
"field_type",
"in",
"FIELD_TYPES",
"if",
"field_type",
"==",
"'boolean'",
":",
"if",
"value",
"==",
"'t'",
":",
"return",
"True",
"elif",
"value",
"==",
"'f'",
":",
"return",
... | Converts a serialized Xapian value
to Python equivalent based on the field_type.
Doesn't accept multivalued fields. | [
"Converts",
"a",
"serialized",
"Xapian",
"value",
"to",
"Python",
"equivalent",
"based",
"on",
"the",
"field_type",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1633-L1659 | train | 39,447 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend.remove | def remove(self, obj, commit=True):
"""
Remove indexes for `obj` from the database.
We delete all instances of `Q<app_name>.<model_name>.<pk>` which
should be unique to this object.
Optional arguments:
`commit` -- ignored
"""
database = self._database(writable=True)
database.delete_document(TERM_PREFIXES[ID] + get_identifier(obj))
database.close() | python | def remove(self, obj, commit=True):
"""
Remove indexes for `obj` from the database.
We delete all instances of `Q<app_name>.<model_name>.<pk>` which
should be unique to this object.
Optional arguments:
`commit` -- ignored
"""
database = self._database(writable=True)
database.delete_document(TERM_PREFIXES[ID] + get_identifier(obj))
database.close() | [
"def",
"remove",
"(",
"self",
",",
"obj",
",",
"commit",
"=",
"True",
")",
":",
"database",
"=",
"self",
".",
"_database",
"(",
"writable",
"=",
"True",
")",
"database",
".",
"delete_document",
"(",
"TERM_PREFIXES",
"[",
"ID",
"]",
"+",
"get_identifier",... | Remove indexes for `obj` from the database.
We delete all instances of `Q<app_name>.<model_name>.<pk>` which
should be unique to this object.
Optional arguments:
`commit` -- ignored | [
"Remove",
"indexes",
"for",
"obj",
"from",
"the",
"database",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L505-L517 | train | 39,448 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend.clear | def clear(self, models=(), commit=True):
"""
Clear all instances of `models` from the database or all models, if
not specified.
Optional Arguments:
`models` -- Models to clear from the database (default = [])
If `models` is empty, an empty query is executed which matches all
documents in the database. Afterwards, each match is deleted.
Otherwise, for each model, a `delete_document` call is issued with
the term `XCONTENTTYPE<app_name>.<model_name>`. This will delete
all documents with the specified model type.
"""
if not models:
# Because there does not appear to be a "clear all" method,
# it's much quicker to remove the contents of the `self.path`
# folder than it is to remove each document one at a time.
if os.path.exists(self.path):
shutil.rmtree(self.path)
else:
database = self._database(writable=True)
for model in models:
database.delete_document(TERM_PREFIXES[DJANGO_CT] + get_model_ct(model))
database.close() | python | def clear(self, models=(), commit=True):
"""
Clear all instances of `models` from the database or all models, if
not specified.
Optional Arguments:
`models` -- Models to clear from the database (default = [])
If `models` is empty, an empty query is executed which matches all
documents in the database. Afterwards, each match is deleted.
Otherwise, for each model, a `delete_document` call is issued with
the term `XCONTENTTYPE<app_name>.<model_name>`. This will delete
all documents with the specified model type.
"""
if not models:
# Because there does not appear to be a "clear all" method,
# it's much quicker to remove the contents of the `self.path`
# folder than it is to remove each document one at a time.
if os.path.exists(self.path):
shutil.rmtree(self.path)
else:
database = self._database(writable=True)
for model in models:
database.delete_document(TERM_PREFIXES[DJANGO_CT] + get_model_ct(model))
database.close() | [
"def",
"clear",
"(",
"self",
",",
"models",
"=",
"(",
")",
",",
"commit",
"=",
"True",
")",
":",
"if",
"not",
"models",
":",
"# Because there does not appear to be a \"clear all\" method,",
"# it's much quicker to remove the contents of the `self.path`",
"# folder than it i... | Clear all instances of `models` from the database or all models, if
not specified.
Optional Arguments:
`models` -- Models to clear from the database (default = [])
If `models` is empty, an empty query is executed which matches all
documents in the database. Afterwards, each match is deleted.
Otherwise, for each model, a `delete_document` call is issued with
the term `XCONTENTTYPE<app_name>.<model_name>`. This will delete
all documents with the specified model type. | [
"Clear",
"all",
"instances",
"of",
"models",
"from",
"the",
"database",
"or",
"all",
"models",
"if",
"not",
"specified",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L519-L544 | train | 39,449 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._build_models_query | def _build_models_query(self, query):
"""
Builds a query from `query` that filters to documents only from registered models.
"""
registered_models_ct = self.build_models_list()
if registered_models_ct:
restrictions = [xapian.Query('%s%s' % (TERM_PREFIXES[DJANGO_CT], model_ct))
for model_ct in registered_models_ct]
limit_query = xapian.Query(xapian.Query.OP_OR, restrictions)
query = xapian.Query(xapian.Query.OP_AND, query, limit_query)
return query | python | def _build_models_query(self, query):
"""
Builds a query from `query` that filters to documents only from registered models.
"""
registered_models_ct = self.build_models_list()
if registered_models_ct:
restrictions = [xapian.Query('%s%s' % (TERM_PREFIXES[DJANGO_CT], model_ct))
for model_ct in registered_models_ct]
limit_query = xapian.Query(xapian.Query.OP_OR, restrictions)
query = xapian.Query(xapian.Query.OP_AND, query, limit_query)
return query | [
"def",
"_build_models_query",
"(",
"self",
",",
"query",
")",
":",
"registered_models_ct",
"=",
"self",
".",
"build_models_list",
"(",
")",
"if",
"registered_models_ct",
":",
"restrictions",
"=",
"[",
"xapian",
".",
"Query",
"(",
"'%s%s'",
"%",
"(",
"TERM_PREF... | Builds a query from `query` that filters to documents only from registered models. | [
"Builds",
"a",
"query",
"from",
"query",
"that",
"filters",
"to",
"documents",
"only",
"from",
"registered",
"models",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L552-L564 | train | 39,450 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._check_field_names | def _check_field_names(self, field_names):
"""
Raises InvalidIndexError if any of a field_name in field_names is
not indexed.
"""
if field_names:
for field_name in field_names:
try:
self.column[field_name]
except KeyError:
raise InvalidIndexError('Trying to use non indexed field "%s"' % field_name) | python | def _check_field_names(self, field_names):
"""
Raises InvalidIndexError if any of a field_name in field_names is
not indexed.
"""
if field_names:
for field_name in field_names:
try:
self.column[field_name]
except KeyError:
raise InvalidIndexError('Trying to use non indexed field "%s"' % field_name) | [
"def",
"_check_field_names",
"(",
"self",
",",
"field_names",
")",
":",
"if",
"field_names",
":",
"for",
"field_name",
"in",
"field_names",
":",
"try",
":",
"self",
".",
"column",
"[",
"field_name",
"]",
"except",
"KeyError",
":",
"raise",
"InvalidIndexError",... | Raises InvalidIndexError if any of a field_name in field_names is
not indexed. | [
"Raises",
"InvalidIndexError",
"if",
"any",
"of",
"a",
"field_name",
"in",
"field_names",
"is",
"not",
"indexed",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L566-L576 | train | 39,451 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend.more_like_this | def more_like_this(self, model_instance, additional_query=None,
start_offset=0, end_offset=None,
limit_to_registered_models=True, result_class=None, **kwargs):
"""
Given a model instance, returns a result set of similar documents.
Required arguments:
`model_instance` -- The model instance to use as a basis for
retrieving similar documents.
Optional arguments:
`additional_query` -- An additional query to narrow results
`start_offset` -- The starting offset (default=0)
`end_offset` -- The ending offset (default=None), if None, then all documents
`limit_to_registered_models` -- Limit returned results to models registered in the search (default = True)
Returns:
A dictionary with the following keys:
`results` -- A list of `SearchResult`
`hits` -- The total available results
Opens a database connection, then builds a simple query using the
`model_instance` to build the unique identifier.
For each document retrieved(should always be one), adds an entry into
an RSet (relevance set) with the document id, then, uses the RSet
to query for an ESet (A set of terms that can be used to suggest
expansions to the original query), omitting any document that was in
the original query.
Finally, processes the resulting matches and returns.
"""
database = self._database()
if result_class is None:
result_class = SearchResult
query = xapian.Query(TERM_PREFIXES[ID] + get_identifier(model_instance))
enquire = xapian.Enquire(database)
enquire.set_query(query)
rset = xapian.RSet()
if not end_offset:
end_offset = database.get_doccount()
match = None
for match in self._get_enquire_mset(database, enquire, 0, end_offset):
rset.add_document(match.docid)
if match is None:
if not self.silently_fail:
raise InvalidIndexError('Instance %s with id "%d" not indexed' %
(get_identifier(model_instance), model_instance.id))
else:
return {'results': [],
'hits': 0}
query = xapian.Query(
xapian.Query.OP_ELITE_SET,
[expand.term for expand in enquire.get_eset(match.document.termlist_count(), rset, XHExpandDecider())],
match.document.termlist_count()
)
query = xapian.Query(
xapian.Query.OP_AND_NOT, [query, TERM_PREFIXES[ID] + get_identifier(model_instance)]
)
if limit_to_registered_models:
query = self._build_models_query(query)
if additional_query:
query = xapian.Query(
xapian.Query.OP_AND, query, additional_query
)
enquire.set_query(query)
results = []
matches = self._get_enquire_mset(database, enquire, start_offset, end_offset)
for match in matches:
app_label, model_name, pk, model_data = pickle.loads(self._get_document_data(database, match.document))
results.append(
result_class(app_label, model_name, pk, match.percent, **model_data)
)
return {
'results': results,
'hits': self._get_hit_count(database, enquire),
'facets': {
'fields': {},
'dates': {},
'queries': {},
},
'spelling_suggestion': None,
} | python | def more_like_this(self, model_instance, additional_query=None,
start_offset=0, end_offset=None,
limit_to_registered_models=True, result_class=None, **kwargs):
"""
Given a model instance, returns a result set of similar documents.
Required arguments:
`model_instance` -- The model instance to use as a basis for
retrieving similar documents.
Optional arguments:
`additional_query` -- An additional query to narrow results
`start_offset` -- The starting offset (default=0)
`end_offset` -- The ending offset (default=None), if None, then all documents
`limit_to_registered_models` -- Limit returned results to models registered in the search (default = True)
Returns:
A dictionary with the following keys:
`results` -- A list of `SearchResult`
`hits` -- The total available results
Opens a database connection, then builds a simple query using the
`model_instance` to build the unique identifier.
For each document retrieved(should always be one), adds an entry into
an RSet (relevance set) with the document id, then, uses the RSet
to query for an ESet (A set of terms that can be used to suggest
expansions to the original query), omitting any document that was in
the original query.
Finally, processes the resulting matches and returns.
"""
database = self._database()
if result_class is None:
result_class = SearchResult
query = xapian.Query(TERM_PREFIXES[ID] + get_identifier(model_instance))
enquire = xapian.Enquire(database)
enquire.set_query(query)
rset = xapian.RSet()
if not end_offset:
end_offset = database.get_doccount()
match = None
for match in self._get_enquire_mset(database, enquire, 0, end_offset):
rset.add_document(match.docid)
if match is None:
if not self.silently_fail:
raise InvalidIndexError('Instance %s with id "%d" not indexed' %
(get_identifier(model_instance), model_instance.id))
else:
return {'results': [],
'hits': 0}
query = xapian.Query(
xapian.Query.OP_ELITE_SET,
[expand.term for expand in enquire.get_eset(match.document.termlist_count(), rset, XHExpandDecider())],
match.document.termlist_count()
)
query = xapian.Query(
xapian.Query.OP_AND_NOT, [query, TERM_PREFIXES[ID] + get_identifier(model_instance)]
)
if limit_to_registered_models:
query = self._build_models_query(query)
if additional_query:
query = xapian.Query(
xapian.Query.OP_AND, query, additional_query
)
enquire.set_query(query)
results = []
matches = self._get_enquire_mset(database, enquire, start_offset, end_offset)
for match in matches:
app_label, model_name, pk, model_data = pickle.loads(self._get_document_data(database, match.document))
results.append(
result_class(app_label, model_name, pk, match.percent, **model_data)
)
return {
'results': results,
'hits': self._get_hit_count(database, enquire),
'facets': {
'fields': {},
'dates': {},
'queries': {},
},
'spelling_suggestion': None,
} | [
"def",
"more_like_this",
"(",
"self",
",",
"model_instance",
",",
"additional_query",
"=",
"None",
",",
"start_offset",
"=",
"0",
",",
"end_offset",
"=",
"None",
",",
"limit_to_registered_models",
"=",
"True",
",",
"result_class",
"=",
"None",
",",
"*",
"*",
... | Given a model instance, returns a result set of similar documents.
Required arguments:
`model_instance` -- The model instance to use as a basis for
retrieving similar documents.
Optional arguments:
`additional_query` -- An additional query to narrow results
`start_offset` -- The starting offset (default=0)
`end_offset` -- The ending offset (default=None), if None, then all documents
`limit_to_registered_models` -- Limit returned results to models registered in the search (default = True)
Returns:
A dictionary with the following keys:
`results` -- A list of `SearchResult`
`hits` -- The total available results
Opens a database connection, then builds a simple query using the
`model_instance` to build the unique identifier.
For each document retrieved(should always be one), adds an entry into
an RSet (relevance set) with the document id, then, uses the RSet
to query for an ESet (A set of terms that can be used to suggest
expansions to the original query), omitting any document that was in
the original query.
Finally, processes the resulting matches and returns. | [
"Given",
"a",
"model",
"instance",
"returns",
"a",
"result",
"set",
"of",
"similar",
"documents",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L719-L815 | train | 39,452 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend.parse_query | def parse_query(self, query_string):
"""
Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query
"""
if query_string == '*':
return xapian.Query('') # Match everything
elif query_string == '':
return xapian.Query() # Match nothing
qp = xapian.QueryParser()
qp.set_database(self._database())
qp.set_stemmer(xapian.Stem(self.language))
qp.set_stemming_strategy(self.stemming_strategy)
qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR])
qp.add_boolean_prefix(DJANGO_CT, TERM_PREFIXES[DJANGO_CT])
for field_dict in self.schema:
# since 'django_ct' has a boolean_prefix,
# we ignore it here.
if field_dict['field_name'] == DJANGO_CT:
continue
qp.add_prefix(
field_dict['field_name'],
TERM_PREFIXES['field'] + field_dict['field_name'].upper()
)
vrp = XHValueRangeProcessor(self)
qp.add_valuerangeprocessor(vrp)
return qp.parse_query(query_string, self.flags) | python | def parse_query(self, query_string):
"""
Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query
"""
if query_string == '*':
return xapian.Query('') # Match everything
elif query_string == '':
return xapian.Query() # Match nothing
qp = xapian.QueryParser()
qp.set_database(self._database())
qp.set_stemmer(xapian.Stem(self.language))
qp.set_stemming_strategy(self.stemming_strategy)
qp.set_default_op(XAPIAN_OPTS[DEFAULT_OPERATOR])
qp.add_boolean_prefix(DJANGO_CT, TERM_PREFIXES[DJANGO_CT])
for field_dict in self.schema:
# since 'django_ct' has a boolean_prefix,
# we ignore it here.
if field_dict['field_name'] == DJANGO_CT:
continue
qp.add_prefix(
field_dict['field_name'],
TERM_PREFIXES['field'] + field_dict['field_name'].upper()
)
vrp = XHValueRangeProcessor(self)
qp.add_valuerangeprocessor(vrp)
return qp.parse_query(query_string, self.flags) | [
"def",
"parse_query",
"(",
"self",
",",
"query_string",
")",
":",
"if",
"query_string",
"==",
"'*'",
":",
"return",
"xapian",
".",
"Query",
"(",
"''",
")",
"# Match everything",
"elif",
"query_string",
"==",
"''",
":",
"return",
"xapian",
".",
"Query",
"("... | Given a `query_string`, will attempt to return a xapian.Query
Required arguments:
``query_string`` -- A query string to parse
Returns a xapian.Query | [
"Given",
"a",
"query_string",
"will",
"attempt",
"to",
"return",
"a",
"xapian",
".",
"Query"
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L817-L852 | train | 39,453 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend.build_schema | def build_schema(self, fields):
"""
Build the schema from fields.
:param fields: A list of fields in the index
:returns: list of dictionaries
Each dictionary has the keys
field_name: The name of the field index
type: what type of value it is
'multi_valued': if it allows more than one value
'column': a number identifying it
'type': the type of the field
'multi_valued': 'false', 'column': 0}
"""
content_field_name = ''
schema_fields = [
{'field_name': ID,
'type': 'text',
'multi_valued': 'false',
'column': 0},
{'field_name': DJANGO_ID,
'type': 'integer',
'multi_valued': 'false',
'column': 1},
{'field_name': DJANGO_CT,
'type': 'text',
'multi_valued': 'false',
'column': 2},
]
self._columns[ID] = 0
self._columns[DJANGO_ID] = 1
self._columns[DJANGO_CT] = 2
column = len(schema_fields)
for field_name, field_class in sorted(list(fields.items()), key=lambda n: n[0]):
if field_class.document is True:
content_field_name = field_class.index_fieldname
if field_class.indexed is True:
field_data = {
'field_name': field_class.index_fieldname,
'type': 'text',
'multi_valued': 'false',
'column': column,
}
if field_class.field_type == 'date':
field_data['type'] = 'date'
elif field_class.field_type == 'datetime':
field_data['type'] = 'datetime'
elif field_class.field_type == 'integer':
field_data['type'] = 'integer'
elif field_class.field_type == 'float':
field_data['type'] = 'float'
elif field_class.field_type == 'boolean':
field_data['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_data['type'] = 'ngram'
elif field_class.field_type == 'edge_ngram':
field_data['type'] = 'edge_ngram'
if field_class.is_multivalued:
field_data['multi_valued'] = 'true'
schema_fields.append(field_data)
self._columns[field_data['field_name']] = column
column += 1
return content_field_name, schema_fields | python | def build_schema(self, fields):
"""
Build the schema from fields.
:param fields: A list of fields in the index
:returns: list of dictionaries
Each dictionary has the keys
field_name: The name of the field index
type: what type of value it is
'multi_valued': if it allows more than one value
'column': a number identifying it
'type': the type of the field
'multi_valued': 'false', 'column': 0}
"""
content_field_name = ''
schema_fields = [
{'field_name': ID,
'type': 'text',
'multi_valued': 'false',
'column': 0},
{'field_name': DJANGO_ID,
'type': 'integer',
'multi_valued': 'false',
'column': 1},
{'field_name': DJANGO_CT,
'type': 'text',
'multi_valued': 'false',
'column': 2},
]
self._columns[ID] = 0
self._columns[DJANGO_ID] = 1
self._columns[DJANGO_CT] = 2
column = len(schema_fields)
for field_name, field_class in sorted(list(fields.items()), key=lambda n: n[0]):
if field_class.document is True:
content_field_name = field_class.index_fieldname
if field_class.indexed is True:
field_data = {
'field_name': field_class.index_fieldname,
'type': 'text',
'multi_valued': 'false',
'column': column,
}
if field_class.field_type == 'date':
field_data['type'] = 'date'
elif field_class.field_type == 'datetime':
field_data['type'] = 'datetime'
elif field_class.field_type == 'integer':
field_data['type'] = 'integer'
elif field_class.field_type == 'float':
field_data['type'] = 'float'
elif field_class.field_type == 'boolean':
field_data['type'] = 'boolean'
elif field_class.field_type == 'ngram':
field_data['type'] = 'ngram'
elif field_class.field_type == 'edge_ngram':
field_data['type'] = 'edge_ngram'
if field_class.is_multivalued:
field_data['multi_valued'] = 'true'
schema_fields.append(field_data)
self._columns[field_data['field_name']] = column
column += 1
return content_field_name, schema_fields | [
"def",
"build_schema",
"(",
"self",
",",
"fields",
")",
":",
"content_field_name",
"=",
"''",
"schema_fields",
"=",
"[",
"{",
"'field_name'",
":",
"ID",
",",
"'type'",
":",
"'text'",
",",
"'multi_valued'",
":",
"'false'",
",",
"'column'",
":",
"0",
"}",
... | Build the schema from fields.
:param fields: A list of fields in the index
:returns: list of dictionaries
Each dictionary has the keys
field_name: The name of the field index
type: what type of value it is
'multi_valued': if it allows more than one value
'column': a number identifying it
'type': the type of the field
'multi_valued': 'false', 'column': 0} | [
"Build",
"the",
"schema",
"from",
"fields",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L854-L924 | train | 39,454 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._do_highlight | def _do_highlight(content, query, tag='em'):
"""
Highlight `query` terms in `content` with html `tag`.
This method assumes that the input text (`content`) does not contain
any special formatting. That is, it does not contain any html tags
or similar markup that could be screwed up by the highlighting.
Required arguments:
`content` -- Content to search for instances of `text`
`text` -- The text to be highlighted
"""
for term in query:
term = term.decode('utf-8')
for match in re.findall('[^A-Z]+', term): # Ignore field identifiers
match_re = re.compile(match, re.I)
content = match_re.sub('<%s>%s</%s>' % (tag, term, tag), content)
return content | python | def _do_highlight(content, query, tag='em'):
"""
Highlight `query` terms in `content` with html `tag`.
This method assumes that the input text (`content`) does not contain
any special formatting. That is, it does not contain any html tags
or similar markup that could be screwed up by the highlighting.
Required arguments:
`content` -- Content to search for instances of `text`
`text` -- The text to be highlighted
"""
for term in query:
term = term.decode('utf-8')
for match in re.findall('[^A-Z]+', term): # Ignore field identifiers
match_re = re.compile(match, re.I)
content = match_re.sub('<%s>%s</%s>' % (tag, term, tag), content)
return content | [
"def",
"_do_highlight",
"(",
"content",
",",
"query",
",",
"tag",
"=",
"'em'",
")",
":",
"for",
"term",
"in",
"query",
":",
"term",
"=",
"term",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"match",
"in",
"re",
".",
"findall",
"(",
"'[^A-Z]+'",
",",
... | Highlight `query` terms in `content` with html `tag`.
This method assumes that the input text (`content`) does not contain
any special formatting. That is, it does not contain any html tags
or similar markup that could be screwed up by the highlighting.
Required arguments:
`content` -- Content to search for instances of `text`
`text` -- The text to be highlighted | [
"Highlight",
"query",
"terms",
"in",
"content",
"with",
"html",
"tag",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L927-L945 | train | 39,455 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._prepare_facet_field_spies | def _prepare_facet_field_spies(self, facets):
"""
Returns a list of spies based on the facets
used to count frequencies.
"""
spies = []
for facet in facets:
slot = self.column[facet]
spy = xapian.ValueCountMatchSpy(slot)
# add attribute "slot" to know which column this spy is targeting.
spy.slot = slot
spies.append(spy)
return spies | python | def _prepare_facet_field_spies(self, facets):
"""
Returns a list of spies based on the facets
used to count frequencies.
"""
spies = []
for facet in facets:
slot = self.column[facet]
spy = xapian.ValueCountMatchSpy(slot)
# add attribute "slot" to know which column this spy is targeting.
spy.slot = slot
spies.append(spy)
return spies | [
"def",
"_prepare_facet_field_spies",
"(",
"self",
",",
"facets",
")",
":",
"spies",
"=",
"[",
"]",
"for",
"facet",
"in",
"facets",
":",
"slot",
"=",
"self",
".",
"column",
"[",
"facet",
"]",
"spy",
"=",
"xapian",
".",
"ValueCountMatchSpy",
"(",
"slot",
... | Returns a list of spies based on the facets
used to count frequencies. | [
"Returns",
"a",
"list",
"of",
"spies",
"based",
"on",
"the",
"facets",
"used",
"to",
"count",
"frequencies",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L947-L959 | train | 39,456 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._do_multivalued_field_facets | def _do_multivalued_field_facets(self, results, field_facets):
"""
Implements a multivalued field facet on the results.
This is implemented using brute force - O(N^2) -
because Xapian does not have it implemented yet
(see http://trac.xapian.org/ticket/199)
"""
facet_dict = {}
for field in field_facets:
facet_list = {}
if not self._multi_value_field(field):
continue
for result in results:
field_value = getattr(result, field)
for item in field_value: # Facet each item in a MultiValueField
facet_list[item] = facet_list.get(item, 0) + 1
facet_dict[field] = list(facet_list.items())
return facet_dict | python | def _do_multivalued_field_facets(self, results, field_facets):
"""
Implements a multivalued field facet on the results.
This is implemented using brute force - O(N^2) -
because Xapian does not have it implemented yet
(see http://trac.xapian.org/ticket/199)
"""
facet_dict = {}
for field in field_facets:
facet_list = {}
if not self._multi_value_field(field):
continue
for result in results:
field_value = getattr(result, field)
for item in field_value: # Facet each item in a MultiValueField
facet_list[item] = facet_list.get(item, 0) + 1
facet_dict[field] = list(facet_list.items())
return facet_dict | [
"def",
"_do_multivalued_field_facets",
"(",
"self",
",",
"results",
",",
"field_facets",
")",
":",
"facet_dict",
"=",
"{",
"}",
"for",
"field",
"in",
"field_facets",
":",
"facet_list",
"=",
"{",
"}",
"if",
"not",
"self",
".",
"_multi_value_field",
"(",
"fiel... | Implements a multivalued field facet on the results.
This is implemented using brute force - O(N^2) -
because Xapian does not have it implemented yet
(see http://trac.xapian.org/ticket/199) | [
"Implements",
"a",
"multivalued",
"field",
"facet",
"on",
"the",
"results",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L985-L1006 | train | 39,457 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._do_date_facets | def _do_date_facets(results, date_facets):
"""
Private method that facets a document by date ranges
Required arguments:
`results` -- A list SearchResults to facet
`date_facets` -- A dictionary containing facet parameters:
{'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}}
nb., gap must be one of the following:
year|month|day|hour|minute|second
For each date facet field in `date_facets`, generates a list
of date ranges (from `start_date` to `end_date` by `gap_by`) then
iterates through `results` and tallies the count for each date_facet.
Returns a dictionary of date facets (fields) containing a list with
entries for each range and a count of documents matching the range.
eg. {
'pub_date': [
(datetime.datetime(2009, 1, 1, 0, 0), 5),
(datetime.datetime(2009, 2, 1, 0, 0), 0),
(datetime.datetime(2009, 3, 1, 0, 0), 0),
(datetime.datetime(2008, 4, 1, 0, 0), 1),
(datetime.datetime(2008, 5, 1, 0, 0), 2),
],
}
"""
def next_datetime(previous, gap_value, gap_type):
year = previous.year
month = previous.month
if gap_type == 'year':
next = previous.replace(year=year + gap_value)
elif gap_type == 'month':
if month + gap_value <= 12:
next = previous.replace(month=month + gap_value)
else:
next = previous.replace(
month=((month + gap_value) % 12),
year=(year + (month + gap_value) // 12)
)
elif gap_type == 'day':
next = previous + datetime.timedelta(days=gap_value)
elif gap_type == 'hour':
return previous + datetime.timedelta(hours=gap_value)
elif gap_type == 'minute':
next = previous + datetime.timedelta(minutes=gap_value)
elif gap_type == 'second':
next = previous + datetime.timedelta(seconds=gap_value)
else:
raise TypeError('\'gap_by\' must be '
'{second, minute, day, month, year}')
return next
facet_dict = {}
for date_facet, facet_params in list(date_facets.items()):
gap_type = facet_params.get('gap_by')
gap_value = facet_params.get('gap_amount', 1)
date_range = facet_params['start_date']
# construct the bins of the histogram
facet_list = []
while date_range < facet_params['end_date']:
facet_list.append((date_range, 0))
date_range = next_datetime(date_range, gap_value, gap_type)
facet_list = sorted(facet_list, key=lambda x: x[0], reverse=True)
for result in results:
result_date = getattr(result, date_facet)
# convert date to datetime
if not isinstance(result_date, datetime.datetime):
result_date = datetime.datetime(result_date.year,
result_date.month,
result_date.day)
# ignore results outside the boundaries.
if facet_list[0][0] < result_date < facet_list[-1][0]:
continue
# populate the histogram by putting the result on the right bin.
for n, facet_date in enumerate(facet_list):
if result_date > facet_date[0]:
# equal to facet_list[n][1] += 1, but for a tuple
facet_list[n] = (facet_list[n][0], (facet_list[n][1] + 1))
break # bin found; go to next result
facet_dict[date_facet] = facet_list
return facet_dict | python | def _do_date_facets(results, date_facets):
"""
Private method that facets a document by date ranges
Required arguments:
`results` -- A list SearchResults to facet
`date_facets` -- A dictionary containing facet parameters:
{'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}}
nb., gap must be one of the following:
year|month|day|hour|minute|second
For each date facet field in `date_facets`, generates a list
of date ranges (from `start_date` to `end_date` by `gap_by`) then
iterates through `results` and tallies the count for each date_facet.
Returns a dictionary of date facets (fields) containing a list with
entries for each range and a count of documents matching the range.
eg. {
'pub_date': [
(datetime.datetime(2009, 1, 1, 0, 0), 5),
(datetime.datetime(2009, 2, 1, 0, 0), 0),
(datetime.datetime(2009, 3, 1, 0, 0), 0),
(datetime.datetime(2008, 4, 1, 0, 0), 1),
(datetime.datetime(2008, 5, 1, 0, 0), 2),
],
}
"""
def next_datetime(previous, gap_value, gap_type):
year = previous.year
month = previous.month
if gap_type == 'year':
next = previous.replace(year=year + gap_value)
elif gap_type == 'month':
if month + gap_value <= 12:
next = previous.replace(month=month + gap_value)
else:
next = previous.replace(
month=((month + gap_value) % 12),
year=(year + (month + gap_value) // 12)
)
elif gap_type == 'day':
next = previous + datetime.timedelta(days=gap_value)
elif gap_type == 'hour':
return previous + datetime.timedelta(hours=gap_value)
elif gap_type == 'minute':
next = previous + datetime.timedelta(minutes=gap_value)
elif gap_type == 'second':
next = previous + datetime.timedelta(seconds=gap_value)
else:
raise TypeError('\'gap_by\' must be '
'{second, minute, day, month, year}')
return next
facet_dict = {}
for date_facet, facet_params in list(date_facets.items()):
gap_type = facet_params.get('gap_by')
gap_value = facet_params.get('gap_amount', 1)
date_range = facet_params['start_date']
# construct the bins of the histogram
facet_list = []
while date_range < facet_params['end_date']:
facet_list.append((date_range, 0))
date_range = next_datetime(date_range, gap_value, gap_type)
facet_list = sorted(facet_list, key=lambda x: x[0], reverse=True)
for result in results:
result_date = getattr(result, date_facet)
# convert date to datetime
if not isinstance(result_date, datetime.datetime):
result_date = datetime.datetime(result_date.year,
result_date.month,
result_date.day)
# ignore results outside the boundaries.
if facet_list[0][0] < result_date < facet_list[-1][0]:
continue
# populate the histogram by putting the result on the right bin.
for n, facet_date in enumerate(facet_list):
if result_date > facet_date[0]:
# equal to facet_list[n][1] += 1, but for a tuple
facet_list[n] = (facet_list[n][0], (facet_list[n][1] + 1))
break # bin found; go to next result
facet_dict[date_facet] = facet_list
return facet_dict | [
"def",
"_do_date_facets",
"(",
"results",
",",
"date_facets",
")",
":",
"def",
"next_datetime",
"(",
"previous",
",",
"gap_value",
",",
"gap_type",
")",
":",
"year",
"=",
"previous",
".",
"year",
"month",
"=",
"previous",
".",
"month",
"if",
"gap_type",
"=... | Private method that facets a document by date ranges
Required arguments:
`results` -- A list SearchResults to facet
`date_facets` -- A dictionary containing facet parameters:
{'field': {'start_date': ..., 'end_date': ...: 'gap_by': '...', 'gap_amount': n}}
nb., gap must be one of the following:
year|month|day|hour|minute|second
For each date facet field in `date_facets`, generates a list
of date ranges (from `start_date` to `end_date` by `gap_by`) then
iterates through `results` and tallies the count for each date_facet.
Returns a dictionary of date facets (fields) containing a list with
entries for each range and a count of documents matching the range.
eg. {
'pub_date': [
(datetime.datetime(2009, 1, 1, 0, 0), 5),
(datetime.datetime(2009, 2, 1, 0, 0), 0),
(datetime.datetime(2009, 3, 1, 0, 0), 0),
(datetime.datetime(2008, 4, 1, 0, 0), 1),
(datetime.datetime(2008, 5, 1, 0, 0), 2),
],
} | [
"Private",
"method",
"that",
"facets",
"a",
"document",
"by",
"date",
"ranges"
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1009-L1101 | train | 39,458 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._do_query_facets | def _do_query_facets(self, results, query_facets):
"""
Private method that facets a document by query
Required arguments:
`results` -- A list SearchResults to facet
`query_facets` -- A dictionary containing facet parameters:
{'field': 'query', [...]}
For each query in `query_facets`, generates a dictionary entry with
the field name as the key and a tuple with the query and result count
as the value.
eg. {'name': ('a*', 5)}
"""
facet_dict = {}
for field, query in list(dict(query_facets).items()):
facet_dict[field] = (query, self.search(self.parse_query(query))['hits'])
return facet_dict | python | def _do_query_facets(self, results, query_facets):
"""
Private method that facets a document by query
Required arguments:
`results` -- A list SearchResults to facet
`query_facets` -- A dictionary containing facet parameters:
{'field': 'query', [...]}
For each query in `query_facets`, generates a dictionary entry with
the field name as the key and a tuple with the query and result count
as the value.
eg. {'name': ('a*', 5)}
"""
facet_dict = {}
for field, query in list(dict(query_facets).items()):
facet_dict[field] = (query, self.search(self.parse_query(query))['hits'])
return facet_dict | [
"def",
"_do_query_facets",
"(",
"self",
",",
"results",
",",
"query_facets",
")",
":",
"facet_dict",
"=",
"{",
"}",
"for",
"field",
",",
"query",
"in",
"list",
"(",
"dict",
"(",
"query_facets",
")",
".",
"items",
"(",
")",
")",
":",
"facet_dict",
"[",
... | Private method that facets a document by query
Required arguments:
`results` -- A list SearchResults to facet
`query_facets` -- A dictionary containing facet parameters:
{'field': 'query', [...]}
For each query in `query_facets`, generates a dictionary entry with
the field name as the key and a tuple with the query and result count
as the value.
eg. {'name': ('a*', 5)} | [
"Private",
"method",
"that",
"facets",
"a",
"document",
"by",
"query"
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1103-L1122 | train | 39,459 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._do_spelling_suggestion | def _do_spelling_suggestion(database, query, spelling_query):
"""
Private method that returns a single spelling suggestion based on
`spelling_query` or `query`.
Required arguments:
`database` -- The database to check spelling against
`query` -- The query to check
`spelling_query` -- If not None, this will be checked instead of `query`
Returns a string with a suggested spelling
"""
if spelling_query:
if ' ' in spelling_query:
return ' '.join([database.get_spelling_suggestion(term).decode('utf-8') for term in spelling_query.split()])
else:
return database.get_spelling_suggestion(spelling_query).decode('utf-8')
term_set = set()
for term in query:
for match in re.findall('[^A-Z]+', term.decode('utf-8')): # Ignore field identifiers
term_set.add(database.get_spelling_suggestion(match).decode('utf-8'))
return ' '.join(term_set) | python | def _do_spelling_suggestion(database, query, spelling_query):
"""
Private method that returns a single spelling suggestion based on
`spelling_query` or `query`.
Required arguments:
`database` -- The database to check spelling against
`query` -- The query to check
`spelling_query` -- If not None, this will be checked instead of `query`
Returns a string with a suggested spelling
"""
if spelling_query:
if ' ' in spelling_query:
return ' '.join([database.get_spelling_suggestion(term).decode('utf-8') for term in spelling_query.split()])
else:
return database.get_spelling_suggestion(spelling_query).decode('utf-8')
term_set = set()
for term in query:
for match in re.findall('[^A-Z]+', term.decode('utf-8')): # Ignore field identifiers
term_set.add(database.get_spelling_suggestion(match).decode('utf-8'))
return ' '.join(term_set) | [
"def",
"_do_spelling_suggestion",
"(",
"database",
",",
"query",
",",
"spelling_query",
")",
":",
"if",
"spelling_query",
":",
"if",
"' '",
"in",
"spelling_query",
":",
"return",
"' '",
".",
"join",
"(",
"[",
"database",
".",
"get_spelling_suggestion",
"(",
"t... | Private method that returns a single spelling suggestion based on
`spelling_query` or `query`.
Required arguments:
`database` -- The database to check spelling against
`query` -- The query to check
`spelling_query` -- If not None, this will be checked instead of `query`
Returns a string with a suggested spelling | [
"Private",
"method",
"that",
"returns",
"a",
"single",
"spelling",
"suggestion",
"based",
"on",
"spelling_query",
"or",
"query",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1125-L1148 | train | 39,460 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._database | def _database(self, writable=False):
"""
Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase
"""
if self.path == MEMORY_DB_NAME:
if not self.inmemory_db:
self.inmemory_db = xapian.inmemory_open()
return self.inmemory_db
if writable:
database = xapian.WritableDatabase(self.path, xapian.DB_CREATE_OR_OPEN)
else:
try:
database = xapian.Database(self.path)
except xapian.DatabaseOpeningError:
raise InvalidIndexError('Unable to open index at %s' % self.path)
return database | python | def _database(self, writable=False):
"""
Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase
"""
if self.path == MEMORY_DB_NAME:
if not self.inmemory_db:
self.inmemory_db = xapian.inmemory_open()
return self.inmemory_db
if writable:
database = xapian.WritableDatabase(self.path, xapian.DB_CREATE_OR_OPEN)
else:
try:
database = xapian.Database(self.path)
except xapian.DatabaseOpeningError:
raise InvalidIndexError('Unable to open index at %s' % self.path)
return database | [
"def",
"_database",
"(",
"self",
",",
"writable",
"=",
"False",
")",
":",
"if",
"self",
".",
"path",
"==",
"MEMORY_DB_NAME",
":",
"if",
"not",
"self",
".",
"inmemory_db",
":",
"self",
".",
"inmemory_db",
"=",
"xapian",
".",
"inmemory_open",
"(",
")",
"... | Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase | [
"Private",
"method",
"that",
"returns",
"a",
"xapian",
".",
"Database",
"for",
"use",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1150-L1171 | train | 39,461 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._get_document_data | def _get_document_data(database, document):
"""
A safer version of Xapian.document.get_data
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`document` -- An instance of an Xapian.document object
"""
try:
return document.get_data()
except xapian.DatabaseModifiedError:
database.reopen()
return document.get_data() | python | def _get_document_data(database, document):
"""
A safer version of Xapian.document.get_data
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`document` -- An instance of an Xapian.document object
"""
try:
return document.get_data()
except xapian.DatabaseModifiedError:
database.reopen()
return document.get_data() | [
"def",
"_get_document_data",
"(",
"database",
",",
"document",
")",
":",
"try",
":",
"return",
"document",
".",
"get_data",
"(",
")",
"except",
"xapian",
".",
"DatabaseModifiedError",
":",
"database",
".",
"reopen",
"(",
")",
"return",
"document",
".",
"get_... | A safer version of Xapian.document.get_data
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`document` -- An instance of an Xapian.document object | [
"A",
"safer",
"version",
"of",
"Xapian",
".",
"document",
".",
"get_data"
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1194-L1209 | train | 39,462 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._get_hit_count | def _get_hit_count(self, database, enquire):
"""
Given a database and enquire instance, returns the estimated number
of matches.
Required arguments:
`database` -- The database to be queried
`enquire` -- The enquire instance
"""
return self._get_enquire_mset(
database, enquire, 0, database.get_doccount()
).size() | python | def _get_hit_count(self, database, enquire):
"""
Given a database and enquire instance, returns the estimated number
of matches.
Required arguments:
`database` -- The database to be queried
`enquire` -- The enquire instance
"""
return self._get_enquire_mset(
database, enquire, 0, database.get_doccount()
).size() | [
"def",
"_get_hit_count",
"(",
"self",
",",
"database",
",",
"enquire",
")",
":",
"return",
"self",
".",
"_get_enquire_mset",
"(",
"database",
",",
"enquire",
",",
"0",
",",
"database",
".",
"get_doccount",
"(",
")",
")",
".",
"size",
"(",
")"
] | Given a database and enquire instance, returns the estimated number
of matches.
Required arguments:
`database` -- The database to be queried
`enquire` -- The enquire instance | [
"Given",
"a",
"database",
"and",
"enquire",
"instance",
"returns",
"the",
"estimated",
"number",
"of",
"matches",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1211-L1222 | train | 39,463 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchBackend._multi_value_field | def _multi_value_field(self, field):
"""
Private method that returns `True` if a field is multi-valued, else
`False`.
Required arguemnts:
`field` -- The field to lookup
Returns a boolean value indicating whether the field is multi-valued.
"""
for field_dict in self.schema:
if field_dict['field_name'] == field:
return field_dict['multi_valued'] == 'true'
return False | python | def _multi_value_field(self, field):
"""
Private method that returns `True` if a field is multi-valued, else
`False`.
Required arguemnts:
`field` -- The field to lookup
Returns a boolean value indicating whether the field is multi-valued.
"""
for field_dict in self.schema:
if field_dict['field_name'] == field:
return field_dict['multi_valued'] == 'true'
return False | [
"def",
"_multi_value_field",
"(",
"self",
",",
"field",
")",
":",
"for",
"field_dict",
"in",
"self",
".",
"schema",
":",
"if",
"field_dict",
"[",
"'field_name'",
"]",
"==",
"field",
":",
"return",
"field_dict",
"[",
"'multi_valued'",
"]",
"==",
"'true'",
"... | Private method that returns `True` if a field is multi-valued, else
`False`.
Required arguemnts:
`field` -- The field to lookup
Returns a boolean value indicating whether the field is multi-valued. | [
"Private",
"method",
"that",
"returns",
"True",
"if",
"a",
"field",
"is",
"multi",
"-",
"valued",
"else",
"False",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1224-L1237 | train | 39,464 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchQuery._query_from_term | def _query_from_term(self, term, field_name, filter_type, is_not):
"""
Uses arguments to construct a list of xapian.Query's.
"""
if field_name != 'content' and field_name not in self.backend.column:
raise InvalidIndexError('field "%s" not indexed' % field_name)
# It it is an AutoQuery, it has no filters
# or others, thus we short-circuit the procedure.
if isinstance(term, AutoQuery):
if field_name != 'content':
query = '%s:%s' % (field_name, term.prepare(self))
else:
query = term.prepare(self)
return [self.backend.parse_query(query)]
query_list = []
# Handle `ValuesListQuerySet`.
if hasattr(term, 'values_list'):
term = list(term)
if field_name == 'content':
# content is the generic search:
# force no field_name search
# and the field_type to be 'text'.
field_name = None
field_type = 'text'
# we don't know what is the type(term), so we parse it.
# Ideally this would not be required, but
# some filters currently depend on the term to make decisions.
term = _to_xapian_term(term)
query_list.append(self._filter_contains(term, field_name, field_type, is_not))
# when filter has no filter_type, haystack uses
# filter_type = 'content'. Here we remove it
# since the above query is already doing this
if filter_type == 'content':
filter_type = None
else:
# get the field_type from the backend
field_type = self.backend.schema[self.backend.column[field_name]]['type']
# private fields don't accept 'contains' or 'startswith'
# since they have no meaning.
if filter_type in ('contains', 'startswith') and field_name in (ID, DJANGO_ID, DJANGO_CT):
filter_type = 'exact'
if field_type == 'text':
# we don't know what type "term" is, but we know we are searching as text
# so we parse it like that.
# Ideally this would not be required since _term_query does it, but
# some filters currently depend on the term to make decisions.
if isinstance(term, list):
term = [_to_xapian_term(term) for term in term]
else:
term = _to_xapian_term(term)
# todo: we should check that the filter is valid for this field_type or raise InvalidIndexError
if filter_type == 'contains':
query_list.append(self._filter_contains(term, field_name, field_type, is_not))
elif filter_type in ('content', 'exact'):
query_list.append(self._filter_exact(term, field_name, field_type, is_not))
elif filter_type == 'in':
query_list.append(self._filter_in(term, field_name, field_type, is_not))
elif filter_type == 'startswith':
query_list.append(self._filter_startswith(term, field_name, field_type, is_not))
elif filter_type == 'endswith':
raise NotImplementedError("The Xapian search backend doesn't support endswith queries.")
elif filter_type == 'gt':
query_list.append(self._filter_gt(term, field_name, field_type, is_not))
elif filter_type == 'gte':
query_list.append(self._filter_gte(term, field_name, field_type, is_not))
elif filter_type == 'lt':
query_list.append(self._filter_lt(term, field_name, field_type, is_not))
elif filter_type == 'lte':
query_list.append(self._filter_lte(term, field_name, field_type, is_not))
elif filter_type == 'range':
query_list.append(self._filter_range(term, field_name, field_type, is_not))
return query_list | python | def _query_from_term(self, term, field_name, filter_type, is_not):
"""
Uses arguments to construct a list of xapian.Query's.
"""
if field_name != 'content' and field_name not in self.backend.column:
raise InvalidIndexError('field "%s" not indexed' % field_name)
# It it is an AutoQuery, it has no filters
# or others, thus we short-circuit the procedure.
if isinstance(term, AutoQuery):
if field_name != 'content':
query = '%s:%s' % (field_name, term.prepare(self))
else:
query = term.prepare(self)
return [self.backend.parse_query(query)]
query_list = []
# Handle `ValuesListQuerySet`.
if hasattr(term, 'values_list'):
term = list(term)
if field_name == 'content':
# content is the generic search:
# force no field_name search
# and the field_type to be 'text'.
field_name = None
field_type = 'text'
# we don't know what is the type(term), so we parse it.
# Ideally this would not be required, but
# some filters currently depend on the term to make decisions.
term = _to_xapian_term(term)
query_list.append(self._filter_contains(term, field_name, field_type, is_not))
# when filter has no filter_type, haystack uses
# filter_type = 'content'. Here we remove it
# since the above query is already doing this
if filter_type == 'content':
filter_type = None
else:
# get the field_type from the backend
field_type = self.backend.schema[self.backend.column[field_name]]['type']
# private fields don't accept 'contains' or 'startswith'
# since they have no meaning.
if filter_type in ('contains', 'startswith') and field_name in (ID, DJANGO_ID, DJANGO_CT):
filter_type = 'exact'
if field_type == 'text':
# we don't know what type "term" is, but we know we are searching as text
# so we parse it like that.
# Ideally this would not be required since _term_query does it, but
# some filters currently depend on the term to make decisions.
if isinstance(term, list):
term = [_to_xapian_term(term) for term in term]
else:
term = _to_xapian_term(term)
# todo: we should check that the filter is valid for this field_type or raise InvalidIndexError
if filter_type == 'contains':
query_list.append(self._filter_contains(term, field_name, field_type, is_not))
elif filter_type in ('content', 'exact'):
query_list.append(self._filter_exact(term, field_name, field_type, is_not))
elif filter_type == 'in':
query_list.append(self._filter_in(term, field_name, field_type, is_not))
elif filter_type == 'startswith':
query_list.append(self._filter_startswith(term, field_name, field_type, is_not))
elif filter_type == 'endswith':
raise NotImplementedError("The Xapian search backend doesn't support endswith queries.")
elif filter_type == 'gt':
query_list.append(self._filter_gt(term, field_name, field_type, is_not))
elif filter_type == 'gte':
query_list.append(self._filter_gte(term, field_name, field_type, is_not))
elif filter_type == 'lt':
query_list.append(self._filter_lt(term, field_name, field_type, is_not))
elif filter_type == 'lte':
query_list.append(self._filter_lte(term, field_name, field_type, is_not))
elif filter_type == 'range':
query_list.append(self._filter_range(term, field_name, field_type, is_not))
return query_list | [
"def",
"_query_from_term",
"(",
"self",
",",
"term",
",",
"field_name",
",",
"filter_type",
",",
"is_not",
")",
":",
"if",
"field_name",
"!=",
"'content'",
"and",
"field_name",
"not",
"in",
"self",
".",
"backend",
".",
"column",
":",
"raise",
"InvalidIndexEr... | Uses arguments to construct a list of xapian.Query's. | [
"Uses",
"arguments",
"to",
"construct",
"a",
"list",
"of",
"xapian",
".",
"Query",
"s",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1307-L1386 | train | 39,465 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchQuery._filter_contains | def _filter_contains(self, term, field_name, field_type, is_not):
"""
Splits the sentence in terms and join them with OR,
using stemmed and un-stemmed.
Assumes term is not a list.
"""
if field_type == 'text':
term_list = term.split()
else:
term_list = [term]
query = self._or_query(term_list, field_name, field_type)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
else:
return query | python | def _filter_contains(self, term, field_name, field_type, is_not):
"""
Splits the sentence in terms and join them with OR,
using stemmed and un-stemmed.
Assumes term is not a list.
"""
if field_type == 'text':
term_list = term.split()
else:
term_list = [term]
query = self._or_query(term_list, field_name, field_type)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
else:
return query | [
"def",
"_filter_contains",
"(",
"self",
",",
"term",
",",
"field_name",
",",
"field_type",
",",
"is_not",
")",
":",
"if",
"field_type",
"==",
"'text'",
":",
"term_list",
"=",
"term",
".",
"split",
"(",
")",
"else",
":",
"term_list",
"=",
"[",
"term",
"... | Splits the sentence in terms and join them with OR,
using stemmed and un-stemmed.
Assumes term is not a list. | [
"Splits",
"the",
"sentence",
"in",
"terms",
"and",
"join",
"them",
"with",
"OR",
"using",
"stemmed",
"and",
"un",
"-",
"stemmed",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1394-L1410 | train | 39,466 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchQuery._filter_in | def _filter_in(self, term_list, field_name, field_type, is_not):
"""
Returns a query that matches exactly ANY term in term_list.
Notice that:
A in {B,C} <=> (A = B or A = C)
~(A in {B,C}) <=> ~(A = B or A = C)
Because OP_AND_NOT(C, D) <=> (C and ~D), then D=(A in {B,C}) requires `is_not=False`.
Assumes term is a list.
"""
query_list = [self._filter_exact(term, field_name, field_type, is_not=False)
for term in term_list]
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(),
xapian.Query(xapian.Query.OP_OR, query_list))
else:
return xapian.Query(xapian.Query.OP_OR, query_list) | python | def _filter_in(self, term_list, field_name, field_type, is_not):
"""
Returns a query that matches exactly ANY term in term_list.
Notice that:
A in {B,C} <=> (A = B or A = C)
~(A in {B,C}) <=> ~(A = B or A = C)
Because OP_AND_NOT(C, D) <=> (C and ~D), then D=(A in {B,C}) requires `is_not=False`.
Assumes term is a list.
"""
query_list = [self._filter_exact(term, field_name, field_type, is_not=False)
for term in term_list]
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(),
xapian.Query(xapian.Query.OP_OR, query_list))
else:
return xapian.Query(xapian.Query.OP_OR, query_list) | [
"def",
"_filter_in",
"(",
"self",
",",
"term_list",
",",
"field_name",
",",
"field_type",
",",
"is_not",
")",
":",
"query_list",
"=",
"[",
"self",
".",
"_filter_exact",
"(",
"term",
",",
"field_name",
",",
"field_type",
",",
"is_not",
"=",
"False",
")",
... | Returns a query that matches exactly ANY term in term_list.
Notice that:
A in {B,C} <=> (A = B or A = C)
~(A in {B,C}) <=> ~(A = B or A = C)
Because OP_AND_NOT(C, D) <=> (C and ~D), then D=(A in {B,C}) requires `is_not=False`.
Assumes term is a list. | [
"Returns",
"a",
"query",
"that",
"matches",
"exactly",
"ANY",
"term",
"in",
"term_list",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1412-L1430 | train | 39,467 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchQuery._filter_exact | def _filter_exact(self, term, field_name, field_type, is_not):
"""
Returns a query that matches exactly the un-stemmed term
with positional order.
Assumes term is not a list.
"""
if field_type == 'text' and field_name not in (DJANGO_CT,):
term = '^ %s $' % term
query = self._phrase_query(term.split(), field_name, field_type)
else:
query = self._term_query(term, field_name, field_type, stemmed=False)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
else:
return query | python | def _filter_exact(self, term, field_name, field_type, is_not):
"""
Returns a query that matches exactly the un-stemmed term
with positional order.
Assumes term is not a list.
"""
if field_type == 'text' and field_name not in (DJANGO_CT,):
term = '^ %s $' % term
query = self._phrase_query(term.split(), field_name, field_type)
else:
query = self._term_query(term, field_name, field_type, stemmed=False)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
else:
return query | [
"def",
"_filter_exact",
"(",
"self",
",",
"term",
",",
"field_name",
",",
"field_type",
",",
"is_not",
")",
":",
"if",
"field_type",
"==",
"'text'",
"and",
"field_name",
"not",
"in",
"(",
"DJANGO_CT",
",",
")",
":",
"term",
"=",
"'^ %s $'",
"%",
"term",
... | Returns a query that matches exactly the un-stemmed term
with positional order.
Assumes term is not a list. | [
"Returns",
"a",
"query",
"that",
"matches",
"exactly",
"the",
"un",
"-",
"stemmed",
"term",
"with",
"positional",
"order",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1432-L1448 | train | 39,468 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchQuery._filter_startswith | def _filter_startswith(self, term, field_name, field_type, is_not):
"""
Returns a startswith query on the un-stemmed term.
Assumes term is not a list.
"""
if field_type == 'text':
if len(term.split()) == 1:
term = '^ %s*' % term
query = self.backend.parse_query(term)
else:
term = '^ %s' % term
query = self._phrase_query(term.split(), field_name, field_type)
else:
term = '^%s*' % term
query = self.backend.parse_query(term)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
return query | python | def _filter_startswith(self, term, field_name, field_type, is_not):
"""
Returns a startswith query on the un-stemmed term.
Assumes term is not a list.
"""
if field_type == 'text':
if len(term.split()) == 1:
term = '^ %s*' % term
query = self.backend.parse_query(term)
else:
term = '^ %s' % term
query = self._phrase_query(term.split(), field_name, field_type)
else:
term = '^%s*' % term
query = self.backend.parse_query(term)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
return query | [
"def",
"_filter_startswith",
"(",
"self",
",",
"term",
",",
"field_name",
",",
"field_type",
",",
"is_not",
")",
":",
"if",
"field_type",
"==",
"'text'",
":",
"if",
"len",
"(",
"term",
".",
"split",
"(",
")",
")",
"==",
"1",
":",
"term",
"=",
"'^ %s*... | Returns a startswith query on the un-stemmed term.
Assumes term is not a list. | [
"Returns",
"a",
"startswith",
"query",
"on",
"the",
"un",
"-",
"stemmed",
"term",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1450-L1469 | train | 39,469 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchQuery._or_query | def _or_query(self, term_list, field, field_type):
"""
Joins each item of term_list decorated by _term_query with an OR.
"""
term_list = [self._term_query(term, field, field_type) for term in term_list]
return xapian.Query(xapian.Query.OP_OR, term_list) | python | def _or_query(self, term_list, field, field_type):
"""
Joins each item of term_list decorated by _term_query with an OR.
"""
term_list = [self._term_query(term, field, field_type) for term in term_list]
return xapian.Query(xapian.Query.OP_OR, term_list) | [
"def",
"_or_query",
"(",
"self",
",",
"term_list",
",",
"field",
",",
"field_type",
")",
":",
"term_list",
"=",
"[",
"self",
".",
"_term_query",
"(",
"term",
",",
"field",
",",
"field_type",
")",
"for",
"term",
"in",
"term_list",
"]",
"return",
"xapian",... | Joins each item of term_list decorated by _term_query with an OR. | [
"Joins",
"each",
"item",
"of",
"term_list",
"decorated",
"by",
"_term_query",
"with",
"an",
"OR",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1471-L1476 | train | 39,470 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchQuery._term_query | def _term_query(self, term, field_name, field_type, stemmed=True):
"""
Constructs a query of a single term.
If `field_name` is not `None`, the term is search on that field only.
If exact is `True`, the search is restricted to boolean matches.
"""
constructor = '{prefix}{term}'
# construct the prefix to be used.
prefix = ''
if field_name:
prefix = TERM_PREFIXES['field'] + field_name.upper()
term = _to_xapian_term(term)
if field_name in (ID, DJANGO_ID, DJANGO_CT):
# to ensure the value is serialized correctly.
if field_name == DJANGO_ID:
term = int(term)
term = _term_to_xapian_value(term, field_type)
return xapian.Query('%s%s' % (TERM_PREFIXES[field_name], term))
# we construct the query dates in a slightly different way
if field_type == 'datetime':
date, time = term.split()
return xapian.Query(xapian.Query.OP_AND_MAYBE,
constructor.format(prefix=prefix, term=date),
constructor.format(prefix=prefix, term=time)
)
# only use stem if field is text or "None"
if field_type not in ('text', None):
stemmed = False
unstemmed_term = constructor.format(prefix=prefix, term=term)
if stemmed:
stem = xapian.Stem(self.backend.language)
stemmed_term = 'Z' + constructor.format(prefix=prefix, term=stem(term).decode('utf-8'))
return xapian.Query(xapian.Query.OP_OR,
xapian.Query(stemmed_term),
xapian.Query(unstemmed_term)
)
else:
return xapian.Query(unstemmed_term) | python | def _term_query(self, term, field_name, field_type, stemmed=True):
"""
Constructs a query of a single term.
If `field_name` is not `None`, the term is search on that field only.
If exact is `True`, the search is restricted to boolean matches.
"""
constructor = '{prefix}{term}'
# construct the prefix to be used.
prefix = ''
if field_name:
prefix = TERM_PREFIXES['field'] + field_name.upper()
term = _to_xapian_term(term)
if field_name in (ID, DJANGO_ID, DJANGO_CT):
# to ensure the value is serialized correctly.
if field_name == DJANGO_ID:
term = int(term)
term = _term_to_xapian_value(term, field_type)
return xapian.Query('%s%s' % (TERM_PREFIXES[field_name], term))
# we construct the query dates in a slightly different way
if field_type == 'datetime':
date, time = term.split()
return xapian.Query(xapian.Query.OP_AND_MAYBE,
constructor.format(prefix=prefix, term=date),
constructor.format(prefix=prefix, term=time)
)
# only use stem if field is text or "None"
if field_type not in ('text', None):
stemmed = False
unstemmed_term = constructor.format(prefix=prefix, term=term)
if stemmed:
stem = xapian.Stem(self.backend.language)
stemmed_term = 'Z' + constructor.format(prefix=prefix, term=stem(term).decode('utf-8'))
return xapian.Query(xapian.Query.OP_OR,
xapian.Query(stemmed_term),
xapian.Query(unstemmed_term)
)
else:
return xapian.Query(unstemmed_term) | [
"def",
"_term_query",
"(",
"self",
",",
"term",
",",
"field_name",
",",
"field_type",
",",
"stemmed",
"=",
"True",
")",
":",
"constructor",
"=",
"'{prefix}{term}'",
"# construct the prefix to be used.",
"prefix",
"=",
"''",
"if",
"field_name",
":",
"prefix",
"="... | Constructs a query of a single term.
If `field_name` is not `None`, the term is search on that field only.
If exact is `True`, the search is restricted to boolean matches. | [
"Constructs",
"a",
"query",
"of",
"a",
"single",
"term",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1492-L1536 | train | 39,471 |
notanumber/xapian-haystack | xapian_backend.py | XapianSearchQuery._filter_gte | def _filter_gte(self, term, field_name, field_type, is_not):
"""
Private method that returns a xapian.Query that searches for any term
that is greater than `term` in a specified `field`.
"""
vrp = XHValueRangeProcessor(self.backend)
pos, begin, end = vrp('%s:%s' % (field_name, _term_to_xapian_value(term, field_type)), '*')
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT,
self._all_query(),
xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
)
return xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end) | python | def _filter_gte(self, term, field_name, field_type, is_not):
"""
Private method that returns a xapian.Query that searches for any term
that is greater than `term` in a specified `field`.
"""
vrp = XHValueRangeProcessor(self.backend)
pos, begin, end = vrp('%s:%s' % (field_name, _term_to_xapian_value(term, field_type)), '*')
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT,
self._all_query(),
xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end)
)
return xapian.Query(xapian.Query.OP_VALUE_RANGE, pos, begin, end) | [
"def",
"_filter_gte",
"(",
"self",
",",
"term",
",",
"field_name",
",",
"field_type",
",",
"is_not",
")",
":",
"vrp",
"=",
"XHValueRangeProcessor",
"(",
"self",
".",
"backend",
")",
"pos",
",",
"begin",
",",
"end",
"=",
"vrp",
"(",
"'%s:%s'",
"%",
"(",... | Private method that returns a xapian.Query that searches for any term
that is greater than `term` in a specified `field`. | [
"Private",
"method",
"that",
"returns",
"a",
"xapian",
".",
"Query",
"that",
"searches",
"for",
"any",
"term",
"that",
"is",
"greater",
"than",
"term",
"in",
"a",
"specified",
"field",
"."
] | 2247b23d3cb6322ce477d45f84d52da47a940348 | https://github.com/notanumber/xapian-haystack/blob/2247b23d3cb6322ce477d45f84d52da47a940348/xapian_backend.py#L1544-L1556 | train | 39,472 |
hackerlist/glassdoor | glassdoor/gd.py | get | def get(company='', company_uri=''):
"""Performs a HTTP GET for a glassdoor page and returns json"""
if not company and not company_uri:
raise Exception("glassdoor.gd.get(company='', company_uri=''): "\
" company or company_uri required")
payload = {}
if not company_uri:
payload.update({'clickSource': 'searchBtn',
'sc.keyword': company
})
uri = '%s/%s' % (GLASSDOOR_API, REVIEWS_URL)
else:
uri = '%s%s' % (GLASSDOOR_API, company_uri)
r = requests.get(uri, params=payload)
soup = BeautifulSoup(r.content)
results = parse(soup)
return results | python | def get(company='', company_uri=''):
"""Performs a HTTP GET for a glassdoor page and returns json"""
if not company and not company_uri:
raise Exception("glassdoor.gd.get(company='', company_uri=''): "\
" company or company_uri required")
payload = {}
if not company_uri:
payload.update({'clickSource': 'searchBtn',
'sc.keyword': company
})
uri = '%s/%s' % (GLASSDOOR_API, REVIEWS_URL)
else:
uri = '%s%s' % (GLASSDOOR_API, company_uri)
r = requests.get(uri, params=payload)
soup = BeautifulSoup(r.content)
results = parse(soup)
return results | [
"def",
"get",
"(",
"company",
"=",
"''",
",",
"company_uri",
"=",
"''",
")",
":",
"if",
"not",
"company",
"and",
"not",
"company_uri",
":",
"raise",
"Exception",
"(",
"\"glassdoor.gd.get(company='', company_uri=''): \"",
"\" company or company_uri required\"",
")",
... | Performs a HTTP GET for a glassdoor page and returns json | [
"Performs",
"a",
"HTTP",
"GET",
"for",
"a",
"glassdoor",
"page",
"and",
"returns",
"json"
] | 953bac53d499eca439ecd812892605d1906cb055 | https://github.com/hackerlist/glassdoor/blob/953bac53d499eca439ecd812892605d1906cb055/glassdoor/gd.py#L21-L37 | train | 39,473 |
hackerlist/glassdoor | glassdoor/gd.py | parse | def parse(soup):
"""Parses the results for a company search and return the results
if is_direct_match. If no company is found, a list of suggestions
are returned as dict. If one such recommendation is found to be an
exact match, re-perform request for this exact match
"""
if is_direct_match(soup):
return {'satisfaction': parse_satisfaction(soup),
'ceo': parse_ceo(soup),
'meta': parse_meta(soup),
'salary': parse_salary(soup)
}
suggestions = parse_suggestions(soup)
exact_match = next((s for s in suggestions if s['exact']), None)
if exact_match:
return get(company_uri=exact_match['uri'])
return suggestions | python | def parse(soup):
"""Parses the results for a company search and return the results
if is_direct_match. If no company is found, a list of suggestions
are returned as dict. If one such recommendation is found to be an
exact match, re-perform request for this exact match
"""
if is_direct_match(soup):
return {'satisfaction': parse_satisfaction(soup),
'ceo': parse_ceo(soup),
'meta': parse_meta(soup),
'salary': parse_salary(soup)
}
suggestions = parse_suggestions(soup)
exact_match = next((s for s in suggestions if s['exact']), None)
if exact_match:
return get(company_uri=exact_match['uri'])
return suggestions | [
"def",
"parse",
"(",
"soup",
")",
":",
"if",
"is_direct_match",
"(",
"soup",
")",
":",
"return",
"{",
"'satisfaction'",
":",
"parse_satisfaction",
"(",
"soup",
")",
",",
"'ceo'",
":",
"parse_ceo",
"(",
"soup",
")",
",",
"'meta'",
":",
"parse_meta",
"(",
... | Parses the results for a company search and return the results
if is_direct_match. If no company is found, a list of suggestions
are returned as dict. If one such recommendation is found to be an
exact match, re-perform request for this exact match | [
"Parses",
"the",
"results",
"for",
"a",
"company",
"search",
"and",
"return",
"the",
"results",
"if",
"is_direct_match",
".",
"If",
"no",
"company",
"is",
"found",
"a",
"list",
"of",
"suggestions",
"are",
"returned",
"as",
"dict",
".",
"If",
"one",
"such",... | 953bac53d499eca439ecd812892605d1906cb055 | https://github.com/hackerlist/glassdoor/blob/953bac53d499eca439ecd812892605d1906cb055/glassdoor/gd.py#L291-L309 | train | 39,474 |
callsign-viper/Flask-GraphQL-Auth | flask_graphql_auth/decorators.py | get_jwt_data | def get_jwt_data(token, token_type):
"""
Decodes encoded JWT token by using extension setting and validates token type
:param token: The encoded JWT string to decode
:param token_type: JWT type for type validation (access or refresh)
:return: Dictionary containing contents of the JWT
"""
jwt_data = decode_jwt(
encoded_token=token,
secret=current_app.config['JWT_SECRET_KEY'],
algorithm='HS256',
identity_claim_key=current_app.config['JWT_IDENTITY_CLAIM'],
user_claims_key=current_app.config['JWT_USER_CLAIMS']
)
# token type verification
if jwt_data['type'] != token_type:
raise WrongTokenError('Only {} tokens are allowed'.format(token_type))
return jwt_data | python | def get_jwt_data(token, token_type):
"""
Decodes encoded JWT token by using extension setting and validates token type
:param token: The encoded JWT string to decode
:param token_type: JWT type for type validation (access or refresh)
:return: Dictionary containing contents of the JWT
"""
jwt_data = decode_jwt(
encoded_token=token,
secret=current_app.config['JWT_SECRET_KEY'],
algorithm='HS256',
identity_claim_key=current_app.config['JWT_IDENTITY_CLAIM'],
user_claims_key=current_app.config['JWT_USER_CLAIMS']
)
# token type verification
if jwt_data['type'] != token_type:
raise WrongTokenError('Only {} tokens are allowed'.format(token_type))
return jwt_data | [
"def",
"get_jwt_data",
"(",
"token",
",",
"token_type",
")",
":",
"jwt_data",
"=",
"decode_jwt",
"(",
"encoded_token",
"=",
"token",
",",
"secret",
"=",
"current_app",
".",
"config",
"[",
"'JWT_SECRET_KEY'",
"]",
",",
"algorithm",
"=",
"'HS256'",
",",
"ident... | Decodes encoded JWT token by using extension setting and validates token type
:param token: The encoded JWT string to decode
:param token_type: JWT type for type validation (access or refresh)
:return: Dictionary containing contents of the JWT | [
"Decodes",
"encoded",
"JWT",
"token",
"by",
"using",
"extension",
"setting",
"and",
"validates",
"token",
"type"
] | 27897a6ef81e12f87e9695fbaf6659ff025ae782 | https://github.com/callsign-viper/Flask-GraphQL-Auth/blob/27897a6ef81e12f87e9695fbaf6659ff025ae782/flask_graphql_auth/decorators.py#L37-L57 | train | 39,475 |
callsign-viper/Flask-GraphQL-Auth | flask_graphql_auth/decorators.py | query_jwt_required | def query_jwt_required(fn):
"""
A decorator to protect a query resolver.
If you decorate an resolver with this, it will ensure that the requester
has a valid access token before allowing the resolver to be called. This
does not check the freshness of the access token.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
print(args[0])
token = kwargs.pop(current_app.config['JWT_TOKEN_ARGUMENT_NAME'])
try:
verify_jwt_in_argument(token)
except Exception as e:
return AuthInfoField(message=str(e))
return fn(*args, **kwargs)
return wrapper | python | def query_jwt_required(fn):
"""
A decorator to protect a query resolver.
If you decorate an resolver with this, it will ensure that the requester
has a valid access token before allowing the resolver to be called. This
does not check the freshness of the access token.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
print(args[0])
token = kwargs.pop(current_app.config['JWT_TOKEN_ARGUMENT_NAME'])
try:
verify_jwt_in_argument(token)
except Exception as e:
return AuthInfoField(message=str(e))
return fn(*args, **kwargs)
return wrapper | [
"def",
"query_jwt_required",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"print",
"(",
"args",
"[",
"0",
"]",
")",
"token",
"=",
"kwargs",
".",
"pop",
"(",
"current_app... | A decorator to protect a query resolver.
If you decorate an resolver with this, it will ensure that the requester
has a valid access token before allowing the resolver to be called. This
does not check the freshness of the access token. | [
"A",
"decorator",
"to",
"protect",
"a",
"query",
"resolver",
"."
] | 27897a6ef81e12f87e9695fbaf6659ff025ae782 | https://github.com/callsign-viper/Flask-GraphQL-Auth/blob/27897a6ef81e12f87e9695fbaf6659ff025ae782/flask_graphql_auth/decorators.py#L82-L100 | train | 39,476 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.open | def open(self):
"""
Open the HID device for reading and writing.
"""
if self._is_open:
raise HIDException("Failed to open device: HIDDevice already open")
path = self.path.encode('utf-8')
dev = hidapi.hid_open_path(path)
if dev:
self._is_open = True
self._device = dev
else:
raise HIDException("Failed to open device") | python | def open(self):
"""
Open the HID device for reading and writing.
"""
if self._is_open:
raise HIDException("Failed to open device: HIDDevice already open")
path = self.path.encode('utf-8')
dev = hidapi.hid_open_path(path)
if dev:
self._is_open = True
self._device = dev
else:
raise HIDException("Failed to open device") | [
"def",
"open",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_open",
":",
"raise",
"HIDException",
"(",
"\"Failed to open device: HIDDevice already open\"",
")",
"path",
"=",
"self",
".",
"path",
".",
"encode",
"(",
"'utf-8'",
")",
"dev",
"=",
"hidapi",
".",... | Open the HID device for reading and writing. | [
"Open",
"the",
"HID",
"device",
"for",
"reading",
"and",
"writing",
"."
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L112-L126 | train | 39,477 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.close | def close(self):
"""
Closes the hid device
"""
if self._is_open:
self._is_open = False
hidapi.hid_close(self._device) | python | def close(self):
"""
Closes the hid device
"""
if self._is_open:
self._is_open = False
hidapi.hid_close(self._device) | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_open",
":",
"self",
".",
"_is_open",
"=",
"False",
"hidapi",
".",
"hid_close",
"(",
"self",
".",
"_device",
")"
] | Closes the hid device | [
"Closes",
"the",
"hid",
"device"
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L129-L135 | train | 39,478 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.write | def write(self, data, report_id=0):
"""
Writes data to the HID device on its endpoint.
Parameters:
data: data to send on the HID endpoint
report_id: the report ID to use.
Returns:
The number of bytes written including the report ID.
"""
if not self._is_open:
raise HIDException("HIDDevice not open")
write_data = bytearray([report_id]) + bytearray(data)
cdata = ffi.new("const unsigned char[]", bytes(write_data))
num_written = hidapi.hid_write(self._device, cdata, len(write_data))
if num_written < 0:
raise HIDException("Failed to write to HID device: " + str(num_written))
else:
return num_written | python | def write(self, data, report_id=0):
"""
Writes data to the HID device on its endpoint.
Parameters:
data: data to send on the HID endpoint
report_id: the report ID to use.
Returns:
The number of bytes written including the report ID.
"""
if not self._is_open:
raise HIDException("HIDDevice not open")
write_data = bytearray([report_id]) + bytearray(data)
cdata = ffi.new("const unsigned char[]", bytes(write_data))
num_written = hidapi.hid_write(self._device, cdata, len(write_data))
if num_written < 0:
raise HIDException("Failed to write to HID device: " + str(num_written))
else:
return num_written | [
"def",
"write",
"(",
"self",
",",
"data",
",",
"report_id",
"=",
"0",
")",
":",
"if",
"not",
"self",
".",
"_is_open",
":",
"raise",
"HIDException",
"(",
"\"HIDDevice not open\"",
")",
"write_data",
"=",
"bytearray",
"(",
"[",
"report_id",
"]",
")",
"+",
... | Writes data to the HID device on its endpoint.
Parameters:
data: data to send on the HID endpoint
report_id: the report ID to use.
Returns:
The number of bytes written including the report ID. | [
"Writes",
"data",
"to",
"the",
"HID",
"device",
"on",
"its",
"endpoint",
"."
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L137-L158 | train | 39,479 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.read | def read(self, size=64, timeout=None):
"""
Read from the hid device on its endpoint.
Parameters:
size: number of bytes to read
timeout: length to wait in milliseconds
Returns:
The HID report read from the device. The first byte in the result
will be the report ID if used.
"""
if not self._is_open:
raise HIDException("HIDDevice not open")
data = [0] * size
cdata = ffi.new("unsigned char[]", data)
bytes_read = 0
if timeout == None:
bytes_read = hidapi.hid_read(self._device, cdata, len(cdata))
else:
bytes_read = hidapi.hid_read_timeout(self._device, cdata, len(cdata), timeout)
if bytes_read < 0:
raise HIDException("Failed to read from HID device: " + str(bytes_read))
elif bytes_read == 0:
return []
else:
return bytearray(cdata) | python | def read(self, size=64, timeout=None):
"""
Read from the hid device on its endpoint.
Parameters:
size: number of bytes to read
timeout: length to wait in milliseconds
Returns:
The HID report read from the device. The first byte in the result
will be the report ID if used.
"""
if not self._is_open:
raise HIDException("HIDDevice not open")
data = [0] * size
cdata = ffi.new("unsigned char[]", data)
bytes_read = 0
if timeout == None:
bytes_read = hidapi.hid_read(self._device, cdata, len(cdata))
else:
bytes_read = hidapi.hid_read_timeout(self._device, cdata, len(cdata), timeout)
if bytes_read < 0:
raise HIDException("Failed to read from HID device: " + str(bytes_read))
elif bytes_read == 0:
return []
else:
return bytearray(cdata) | [
"def",
"read",
"(",
"self",
",",
"size",
"=",
"64",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_is_open",
":",
"raise",
"HIDException",
"(",
"\"HIDDevice not open\"",
")",
"data",
"=",
"[",
"0",
"]",
"*",
"size",
"cdata",
"=",
... | Read from the hid device on its endpoint.
Parameters:
size: number of bytes to read
timeout: length to wait in milliseconds
Returns:
The HID report read from the device. The first byte in the result
will be the report ID if used. | [
"Read",
"from",
"the",
"hid",
"device",
"on",
"its",
"endpoint",
"."
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L160-L192 | train | 39,480 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.is_connected | def is_connected(self):
"""
Checks if the USB device is still connected
"""
if self._is_open:
err = hidapi.hid_read_timeout(self._device, ffi.NULL, 0, 0)
if err == -1:
return False
else:
return True
else:
en = Enumeration(vid=self.vendor_id, pid=self.product_id).find(path=self.path)
if len(en) == 0:
return False
else:
return True | python | def is_connected(self):
"""
Checks if the USB device is still connected
"""
if self._is_open:
err = hidapi.hid_read_timeout(self._device, ffi.NULL, 0, 0)
if err == -1:
return False
else:
return True
else:
en = Enumeration(vid=self.vendor_id, pid=self.product_id).find(path=self.path)
if len(en) == 0:
return False
else:
return True | [
"def",
"is_connected",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_open",
":",
"err",
"=",
"hidapi",
".",
"hid_read_timeout",
"(",
"self",
".",
"_device",
",",
"ffi",
".",
"NULL",
",",
"0",
",",
"0",
")",
"if",
"err",
"==",
"-",
"1",
":",
"ret... | Checks if the USB device is still connected | [
"Checks",
"if",
"the",
"USB",
"device",
"is",
"still",
"connected"
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L206-L221 | train | 39,481 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.send_feature_report | def send_feature_report(self, data, report_id=0x00):
"""
Send a Feature report to a HID device.
Feature reports are sent over the Control endpoint as a Set_Report
transfer.
Parameters:
data The data to send
Returns:
This function returns the actual number of bytes written
"""
if not self._is_open:
raise HIDException("HIDDevice not open")
report = bytearray([report_id]) + bytearray(data)
cdata = ffi.new("const unsigned char[]", bytes(report))
bytes_written = hidapi.hid_send_feature_report(self._device, cdata, len(report))
if bytes_written == -1:
raise HIDException("Failed to send feature report to HID device")
return bytes_written | python | def send_feature_report(self, data, report_id=0x00):
"""
Send a Feature report to a HID device.
Feature reports are sent over the Control endpoint as a Set_Report
transfer.
Parameters:
data The data to send
Returns:
This function returns the actual number of bytes written
"""
if not self._is_open:
raise HIDException("HIDDevice not open")
report = bytearray([report_id]) + bytearray(data)
cdata = ffi.new("const unsigned char[]", bytes(report))
bytes_written = hidapi.hid_send_feature_report(self._device, cdata, len(report))
if bytes_written == -1:
raise HIDException("Failed to send feature report to HID device")
return bytes_written | [
"def",
"send_feature_report",
"(",
"self",
",",
"data",
",",
"report_id",
"=",
"0x00",
")",
":",
"if",
"not",
"self",
".",
"_is_open",
":",
"raise",
"HIDException",
"(",
"\"HIDDevice not open\"",
")",
"report",
"=",
"bytearray",
"(",
"[",
"report_id",
"]",
... | Send a Feature report to a HID device.
Feature reports are sent over the Control endpoint as a Set_Report
transfer.
Parameters:
data The data to send
Returns:
This function returns the actual number of bytes written | [
"Send",
"a",
"Feature",
"report",
"to",
"a",
"HID",
"device",
"."
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L223-L246 | train | 39,482 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.get_feature_report | def get_feature_report(self, size, report_id=0x00):
"""
Get a feature report from a HID device.
Feature reports are sent over the Control endpoint as a Get_Report
transfer.
Parameters:
size The number of bytes to read.
report_id The report id to read
Returns:
They bytes read from the HID report
"""
data = [0] * (size+1)
cdata = ffi.new("unsigned char[]", bytes(data))
cdata[0] = report_id
bytes_read = hidapi.hid_get_feature_report(self._device, cdata, len(cdata))
if bytes_read == -1:
raise HIDException("Failed to get feature report from HID device")
return bytearray(cdata[1:size+1]) | python | def get_feature_report(self, size, report_id=0x00):
"""
Get a feature report from a HID device.
Feature reports are sent over the Control endpoint as a Get_Report
transfer.
Parameters:
size The number of bytes to read.
report_id The report id to read
Returns:
They bytes read from the HID report
"""
data = [0] * (size+1)
cdata = ffi.new("unsigned char[]", bytes(data))
cdata[0] = report_id
bytes_read = hidapi.hid_get_feature_report(self._device, cdata, len(cdata))
if bytes_read == -1:
raise HIDException("Failed to get feature report from HID device")
return bytearray(cdata[1:size+1]) | [
"def",
"get_feature_report",
"(",
"self",
",",
"size",
",",
"report_id",
"=",
"0x00",
")",
":",
"data",
"=",
"[",
"0",
"]",
"*",
"(",
"size",
"+",
"1",
")",
"cdata",
"=",
"ffi",
".",
"new",
"(",
"\"unsigned char[]\"",
",",
"bytes",
"(",
"data",
")"... | Get a feature report from a HID device.
Feature reports are sent over the Control endpoint as a Get_Report
transfer.
Parameters:
size The number of bytes to read.
report_id The report id to read
Returns:
They bytes read from the HID report | [
"Get",
"a",
"feature",
"report",
"from",
"a",
"HID",
"device",
"."
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L248-L271 | train | 39,483 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.get_error | def get_error(self):
"""
Get an error string from the device
"""
err_str = hidapi.hid_error(self._device)
if err_str == ffi.NULL:
return None
else:
return ffi.string(err_str) | python | def get_error(self):
"""
Get an error string from the device
"""
err_str = hidapi.hid_error(self._device)
if err_str == ffi.NULL:
return None
else:
return ffi.string(err_str) | [
"def",
"get_error",
"(",
"self",
")",
":",
"err_str",
"=",
"hidapi",
".",
"hid_error",
"(",
"self",
".",
"_device",
")",
"if",
"err_str",
"==",
"ffi",
".",
"NULL",
":",
"return",
"None",
"else",
":",
"return",
"ffi",
".",
"string",
"(",
"err_str",
")... | Get an error string from the device | [
"Get",
"an",
"error",
"string",
"from",
"the",
"device"
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L273-L281 | train | 39,484 |
ahtn/python-easyhid | easyhid/easyhid.py | HIDDevice.get_indexed_string | def get_indexed_string(self, index):
"""
Get the string with the given index from the device
"""
max_len = 128
str_buf = ffi.new("wchar_t[]", str(bytearray(max_len)))
ret = hidapi.hid_get_indexed_string(self._device, index, str_buf, max_len)
if ret < 0:
raise HIDException(self._device.get_error())
elif ret == 0:
return None
else:
return ffi.string(str_buf).encode('utf-8') | python | def get_indexed_string(self, index):
"""
Get the string with the given index from the device
"""
max_len = 128
str_buf = ffi.new("wchar_t[]", str(bytearray(max_len)))
ret = hidapi.hid_get_indexed_string(self._device, index, str_buf, max_len)
if ret < 0:
raise HIDException(self._device.get_error())
elif ret == 0:
return None
else:
return ffi.string(str_buf).encode('utf-8') | [
"def",
"get_indexed_string",
"(",
"self",
",",
"index",
")",
":",
"max_len",
"=",
"128",
"str_buf",
"=",
"ffi",
".",
"new",
"(",
"\"wchar_t[]\"",
",",
"str",
"(",
"bytearray",
"(",
"max_len",
")",
")",
")",
"ret",
"=",
"hidapi",
".",
"hid_get_indexed_str... | Get the string with the given index from the device | [
"Get",
"the",
"string",
"with",
"the",
"given",
"index",
"from",
"the",
"device"
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L313-L326 | train | 39,485 |
ahtn/python-easyhid | easyhid/easyhid.py | Enumeration.find | def find(self, vid=None, pid=None, serial=None, interface=None, \
path=None, release_number=None, manufacturer=None,
product=None, usage=None, usage_page=None):
"""
Attempts to open a device in this `Enumeration` object. Optional
arguments can be provided to filter the resulting list based on various
parameters of the HID devices.
Args:
vid: filters by USB Vendor ID
pid: filters by USB Product ID
serial: filters by USB serial string (.iSerialNumber)
interface: filters by interface number (bInterfaceNumber)
release_number: filters by the USB release number (.bcdDevice)
manufacturer: filters by USB manufacturer string (.iManufacturer)
product: filters by USB product string (.iProduct)
usage: filters by HID usage
usage_page: filters by HID usage_page
path: filters by HID API path.
"""
result = []
for dev in self.device_list:
if vid not in [0, None] and dev.vendor_id != vid:
continue
if pid not in [0, None] and dev.product_id != pid:
continue
if serial and dev.serial_number != serial:
continue
if path and dev.path != path:
continue
if manufacturer and dev.manufacturer_string != manufacturer:
continue
if product and dev.product_string != product:
continue
if release_number != None and dev.release_number != release_number:
continue
if interface != None and dev.interface_number != interface:
continue
if usage != None and dev.usage != usage:
continue
if usage_page != None and dev.usage_page != usage_page:
continue
result.append(dev)
return result | python | def find(self, vid=None, pid=None, serial=None, interface=None, \
path=None, release_number=None, manufacturer=None,
product=None, usage=None, usage_page=None):
"""
Attempts to open a device in this `Enumeration` object. Optional
arguments can be provided to filter the resulting list based on various
parameters of the HID devices.
Args:
vid: filters by USB Vendor ID
pid: filters by USB Product ID
serial: filters by USB serial string (.iSerialNumber)
interface: filters by interface number (bInterfaceNumber)
release_number: filters by the USB release number (.bcdDevice)
manufacturer: filters by USB manufacturer string (.iManufacturer)
product: filters by USB product string (.iProduct)
usage: filters by HID usage
usage_page: filters by HID usage_page
path: filters by HID API path.
"""
result = []
for dev in self.device_list:
if vid not in [0, None] and dev.vendor_id != vid:
continue
if pid not in [0, None] and dev.product_id != pid:
continue
if serial and dev.serial_number != serial:
continue
if path and dev.path != path:
continue
if manufacturer and dev.manufacturer_string != manufacturer:
continue
if product and dev.product_string != product:
continue
if release_number != None and dev.release_number != release_number:
continue
if interface != None and dev.interface_number != interface:
continue
if usage != None and dev.usage != usage:
continue
if usage_page != None and dev.usage_page != usage_page:
continue
result.append(dev)
return result | [
"def",
"find",
"(",
"self",
",",
"vid",
"=",
"None",
",",
"pid",
"=",
"None",
",",
"serial",
"=",
"None",
",",
"interface",
"=",
"None",
",",
"path",
"=",
"None",
",",
"release_number",
"=",
"None",
",",
"manufacturer",
"=",
"None",
",",
"product",
... | Attempts to open a device in this `Enumeration` object. Optional
arguments can be provided to filter the resulting list based on various
parameters of the HID devices.
Args:
vid: filters by USB Vendor ID
pid: filters by USB Product ID
serial: filters by USB serial string (.iSerialNumber)
interface: filters by interface number (bInterfaceNumber)
release_number: filters by the USB release number (.bcdDevice)
manufacturer: filters by USB manufacturer string (.iManufacturer)
product: filters by USB product string (.iProduct)
usage: filters by HID usage
usage_page: filters by HID usage_page
path: filters by HID API path. | [
"Attempts",
"to",
"open",
"a",
"device",
"in",
"this",
"Enumeration",
"object",
".",
"Optional",
"arguments",
"can",
"be",
"provided",
"to",
"filter",
"the",
"resulting",
"list",
"based",
"on",
"various",
"parameters",
"of",
"the",
"HID",
"devices",
"."
] | b89a60e5b378495b34c51ef11c5260bb43885780 | https://github.com/ahtn/python-easyhid/blob/b89a60e5b378495b34c51ef11c5260bb43885780/easyhid/easyhid.py#L367-L411 | train | 39,486 |
ladybug-tools/uwg | uwg/utilities.py | str2fl | def str2fl(x):
"""Recurses through lists and converts lists of string to float
Args:
x: string or list of strings
"""
def helper_to_fl(s_):
""" deals with odd string imports converts to float"""
if s_ == "":
return "null"
elif "," in s_:
s_ = s_.replace(",", "")
try:
return float(s_)
except:
return (s_)
fl_lst = []
if isinstance(x[0], str): # Check if list of strings, then sent to conversion
for xi in range(len(x)):
fl_lst.append(helper_to_fl(x[xi]))
elif isinstance(x[0], list): # Check if list of lists, then recurse
for xi in range(len(x)):
fl_lst.append(str2fl(x[xi]))
else:
return False
return fl_lst | python | def str2fl(x):
"""Recurses through lists and converts lists of string to float
Args:
x: string or list of strings
"""
def helper_to_fl(s_):
""" deals with odd string imports converts to float"""
if s_ == "":
return "null"
elif "," in s_:
s_ = s_.replace(",", "")
try:
return float(s_)
except:
return (s_)
fl_lst = []
if isinstance(x[0], str): # Check if list of strings, then sent to conversion
for xi in range(len(x)):
fl_lst.append(helper_to_fl(x[xi]))
elif isinstance(x[0], list): # Check if list of lists, then recurse
for xi in range(len(x)):
fl_lst.append(str2fl(x[xi]))
else:
return False
return fl_lst | [
"def",
"str2fl",
"(",
"x",
")",
":",
"def",
"helper_to_fl",
"(",
"s_",
")",
":",
"\"\"\" deals with odd string imports converts to float\"\"\"",
"if",
"s_",
"==",
"\"\"",
":",
"return",
"\"null\"",
"elif",
"\",\"",
"in",
"s_",
":",
"s_",
"=",
"s_",
".",
"rep... | Recurses through lists and converts lists of string to float
Args:
x: string or list of strings | [
"Recurses",
"through",
"lists",
"and",
"converts",
"lists",
"of",
"string",
"to",
"float"
] | fb71f656b3cb69e7ccf1d851dff862e14fa210fc | https://github.com/ladybug-tools/uwg/blob/fb71f656b3cb69e7ccf1d851dff862e14fa210fc/uwg/utilities.py#L42-L70 | train | 39,487 |
ladybug-tools/uwg | uwg/uwg.py | procMat | def procMat(materials, max_thickness, min_thickness):
""" Processes material layer so that a material with single
layer thickness is divided into two and material layer that is too
thick is subdivided
"""
newmat = []
newthickness = []
k = materials.layerThermalCond
Vhc = materials.layerVolHeat
if len(materials.layerThickness) > 1:
for j in range(len(materials.layerThickness)):
# Break up each layer that's more than max thickness (0.05m)
if materials.layerThickness[j] > max_thickness:
nlayers = math.ceil(materials.layerThickness[j]/float(max_thickness))
for i in range(int(nlayers)):
newmat.append(Material(k[j], Vhc[j], name=materials._name))
newthickness.append(materials.layerThickness[j]/float(nlayers))
# Material that's less then min_thickness is not added.
elif materials.layerThickness[j] < min_thickness:
print("WARNING: Material '{}' layer found too thin (<{:.2f}cm), ignored.").format(
materials._name, min_thickness*100)
else:
newmat.append(Material(k[j], Vhc[j], name=materials._name))
newthickness.append(materials.layerThickness[j])
else:
# Divide single layer into two (uwg assumes at least 2 layers)
if materials.layerThickness[0] > max_thickness:
nlayers = math.ceil(materials.layerThickness[0]/float(max_thickness))
for i in range(int(nlayers)):
newmat.append(Material(k[0], Vhc[0], name=materials._name))
newthickness.append(materials.layerThickness[0]/float(nlayers))
# Material should be at least 1cm thick, so if we're here,
# should give warning and stop. Only warning given for now.
elif materials.layerThickness[0] < min_thickness*2:
newthickness = [min_thickness/2., min_thickness/2.]
newmat = [Material(k[0], Vhc[0], name=materials._name),
Material(k[0], Vhc[0], name=materials._name)]
print("WARNING: a thin (<2cm) single material '{}' layer found. May cause error.".format(
materials._name))
else:
newthickness = [materials.layerThickness[0]/2., materials.layerThickness[0]/2.]
newmat = [Material(k[0], Vhc[0], name=materials._name),
Material(k[0], Vhc[0], name=materials._name)]
return newmat, newthickness | python | def procMat(materials, max_thickness, min_thickness):
""" Processes material layer so that a material with single
layer thickness is divided into two and material layer that is too
thick is subdivided
"""
newmat = []
newthickness = []
k = materials.layerThermalCond
Vhc = materials.layerVolHeat
if len(materials.layerThickness) > 1:
for j in range(len(materials.layerThickness)):
# Break up each layer that's more than max thickness (0.05m)
if materials.layerThickness[j] > max_thickness:
nlayers = math.ceil(materials.layerThickness[j]/float(max_thickness))
for i in range(int(nlayers)):
newmat.append(Material(k[j], Vhc[j], name=materials._name))
newthickness.append(materials.layerThickness[j]/float(nlayers))
# Material that's less then min_thickness is not added.
elif materials.layerThickness[j] < min_thickness:
print("WARNING: Material '{}' layer found too thin (<{:.2f}cm), ignored.").format(
materials._name, min_thickness*100)
else:
newmat.append(Material(k[j], Vhc[j], name=materials._name))
newthickness.append(materials.layerThickness[j])
else:
# Divide single layer into two (uwg assumes at least 2 layers)
if materials.layerThickness[0] > max_thickness:
nlayers = math.ceil(materials.layerThickness[0]/float(max_thickness))
for i in range(int(nlayers)):
newmat.append(Material(k[0], Vhc[0], name=materials._name))
newthickness.append(materials.layerThickness[0]/float(nlayers))
# Material should be at least 1cm thick, so if we're here,
# should give warning and stop. Only warning given for now.
elif materials.layerThickness[0] < min_thickness*2:
newthickness = [min_thickness/2., min_thickness/2.]
newmat = [Material(k[0], Vhc[0], name=materials._name),
Material(k[0], Vhc[0], name=materials._name)]
print("WARNING: a thin (<2cm) single material '{}' layer found. May cause error.".format(
materials._name))
else:
newthickness = [materials.layerThickness[0]/2., materials.layerThickness[0]/2.]
newmat = [Material(k[0], Vhc[0], name=materials._name),
Material(k[0], Vhc[0], name=materials._name)]
return newmat, newthickness | [
"def",
"procMat",
"(",
"materials",
",",
"max_thickness",
",",
"min_thickness",
")",
":",
"newmat",
"=",
"[",
"]",
"newthickness",
"=",
"[",
"]",
"k",
"=",
"materials",
".",
"layerThermalCond",
"Vhc",
"=",
"materials",
".",
"layerVolHeat",
"if",
"len",
"("... | Processes material layer so that a material with single
layer thickness is divided into two and material layer that is too
thick is subdivided | [
"Processes",
"material",
"layer",
"so",
"that",
"a",
"material",
"with",
"single",
"layer",
"thickness",
"is",
"divided",
"into",
"two",
"and",
"material",
"layer",
"that",
"is",
"too",
"thick",
"is",
"subdivided"
] | fb71f656b3cb69e7ccf1d851dff862e14fa210fc | https://github.com/ladybug-tools/uwg/blob/fb71f656b3cb69e7ccf1d851dff862e14fa210fc/uwg/uwg.py#L977-L1024 | train | 39,488 |
ladybug-tools/uwg | uwg/uwg.py | uwg.set_input | def set_input(self):
""" Set inputs from .uwg input file if not already defined, the check if all
the required input parameters are there.
"""
# If a uwgParamFileName is set, then read inputs from .uwg file.
# User-defined class properties will override the inputs from the .uwg file.
if self.uwgParamFileName is not None:
print("\nReading uwg file input.")
self.read_input()
else:
print("\nNo .uwg file input.")
self.check_required_inputs()
# Modify zone to be used as python index
self.zone = int(self.zone)-1 | python | def set_input(self):
""" Set inputs from .uwg input file if not already defined, the check if all
the required input parameters are there.
"""
# If a uwgParamFileName is set, then read inputs from .uwg file.
# User-defined class properties will override the inputs from the .uwg file.
if self.uwgParamFileName is not None:
print("\nReading uwg file input.")
self.read_input()
else:
print("\nNo .uwg file input.")
self.check_required_inputs()
# Modify zone to be used as python index
self.zone = int(self.zone)-1 | [
"def",
"set_input",
"(",
"self",
")",
":",
"# If a uwgParamFileName is set, then read inputs from .uwg file.\r",
"# User-defined class properties will override the inputs from the .uwg file.\r",
"if",
"self",
".",
"uwgParamFileName",
"is",
"not",
"None",
":",
"print",
"(",
"\"\\n... | Set inputs from .uwg input file if not already defined, the check if all
the required input parameters are there. | [
"Set",
"inputs",
"from",
".",
"uwg",
"input",
"file",
"if",
"not",
"already",
"defined",
"the",
"check",
"if",
"all",
"the",
"required",
"input",
"parameters",
"are",
"there",
"."
] | fb71f656b3cb69e7ccf1d851dff862e14fa210fc | https://github.com/ladybug-tools/uwg/blob/fb71f656b3cb69e7ccf1d851dff862e14fa210fc/uwg/uwg.py#L532-L548 | train | 39,489 |
ladybug-tools/uwg | uwg/uwg.py | uwg.write_epw | def write_epw(self):
""" Section 8 - Writing new EPW file
"""
epw_prec = self.epw_precision # precision of epw file input
for iJ in range(len(self.UCMData)):
# [iJ+self.simTime.timeInitial-8] = increments along every weather timestep in epw
# [6 to 21] = column data of epw
self.epwinput[iJ+self.simTime.timeInitial-8][6] = "{0:.{1}f}".format(
self.UCMData[iJ].canTemp - 273.15, epw_prec) # dry bulb temperature [?C]
# dew point temperature [?C]
self.epwinput[iJ+self.simTime.timeInitial -
8][7] = "{0:.{1}f}".format(self.UCMData[iJ].Tdp, epw_prec)
# relative humidity [%]
self.epwinput[iJ+self.simTime.timeInitial -
8][8] = "{0:.{1}f}".format(self.UCMData[iJ].canRHum, epw_prec)
self.epwinput[iJ+self.simTime.timeInitial-8][21] = "{0:.{1}f}".format(
self.WeatherData[iJ].wind, epw_prec) # wind speed [m/s]
# Writing new EPW file
epw_new_id = open(self.newPathName, "w")
for i in range(8):
new_epw_line = '{}\n'.format(reduce(lambda x, y: x+","+y, self._header[i]))
epw_new_id.write(new_epw_line)
for i in range(len(self.epwinput)):
printme = ""
for ei in range(34):
printme += "{}".format(self.epwinput[i][ei]) + ','
printme = printme + "{}".format(self.epwinput[i][ei])
new_epw_line = "{0}\n".format(printme)
epw_new_id.write(new_epw_line)
epw_new_id.close()
print("New climate file '{}' is generated at {}.".format(
self.destinationFileName, self.destinationDir)) | python | def write_epw(self):
""" Section 8 - Writing new EPW file
"""
epw_prec = self.epw_precision # precision of epw file input
for iJ in range(len(self.UCMData)):
# [iJ+self.simTime.timeInitial-8] = increments along every weather timestep in epw
# [6 to 21] = column data of epw
self.epwinput[iJ+self.simTime.timeInitial-8][6] = "{0:.{1}f}".format(
self.UCMData[iJ].canTemp - 273.15, epw_prec) # dry bulb temperature [?C]
# dew point temperature [?C]
self.epwinput[iJ+self.simTime.timeInitial -
8][7] = "{0:.{1}f}".format(self.UCMData[iJ].Tdp, epw_prec)
# relative humidity [%]
self.epwinput[iJ+self.simTime.timeInitial -
8][8] = "{0:.{1}f}".format(self.UCMData[iJ].canRHum, epw_prec)
self.epwinput[iJ+self.simTime.timeInitial-8][21] = "{0:.{1}f}".format(
self.WeatherData[iJ].wind, epw_prec) # wind speed [m/s]
# Writing new EPW file
epw_new_id = open(self.newPathName, "w")
for i in range(8):
new_epw_line = '{}\n'.format(reduce(lambda x, y: x+","+y, self._header[i]))
epw_new_id.write(new_epw_line)
for i in range(len(self.epwinput)):
printme = ""
for ei in range(34):
printme += "{}".format(self.epwinput[i][ei]) + ','
printme = printme + "{}".format(self.epwinput[i][ei])
new_epw_line = "{0}\n".format(printme)
epw_new_id.write(new_epw_line)
epw_new_id.close()
print("New climate file '{}' is generated at {}.".format(
self.destinationFileName, self.destinationDir)) | [
"def",
"write_epw",
"(",
"self",
")",
":",
"epw_prec",
"=",
"self",
".",
"epw_precision",
"# precision of epw file input\r",
"for",
"iJ",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"UCMData",
")",
")",
":",
"# [iJ+self.simTime.timeInitial-8] = increments along eve... | Section 8 - Writing new EPW file | [
"Section",
"8",
"-",
"Writing",
"new",
"EPW",
"file"
] | fb71f656b3cb69e7ccf1d851dff862e14fa210fc | https://github.com/ladybug-tools/uwg/blob/fb71f656b3cb69e7ccf1d851dff862e14fa210fc/uwg/uwg.py#L926-L963 | train | 39,490 |
ladybug-tools/uwg | uwg/element.py | Element.SurfFlux | def SurfFlux(self,forc,parameter,simTime,humRef,tempRef,windRef,boundCond,intFlux):
""" Calculate net heat flux, and update element layer temperatures
"""
# Calculated per unit area (m^2)
dens = forc.pres/(1000*0.287042*tempRef*(1.+1.607858*humRef)) # air density (kgd m-3)
self.aeroCond = 5.8 + 3.7 * windRef # Convection coef (ref: uwg, eq. 12))
if (self.horizontal): # For roof, mass, road
# Evaporation (m s-1), Film water & soil latent heat
if not self.is_near_zero(self.waterStorage) and self.waterStorage > 0.0:
# N.B In the current uwg code, latent heat from evapotranspiration, stagnant water,
# or anthropogenic sources is not modelled due to the difficulty of validation, and
# lack of reliability of precipitation data from EPW files.Therefore this condition
# is never run because all elements have had their waterStorage hardcoded to 0.
qtsat = self.qsat([self.layerTemp[0]],[forc.pres],parameter)[0]
eg = self.aeroCond*parameter.colburn*dens*(qtsat-humRef)/parameter.waterDens/parameter.cp
self.waterStorage = min(self.waterStorage + simTime.dt*(forc.prec-eg),parameter.wgmax)
self.waterStorage = max(self.waterStorage,0.) # (m)
else:
eg = 0.
soilLat = eg*parameter.waterDens*parameter.lv
# Winter, no veg
if simTime.month < parameter.vegStart and simTime.month > parameter.vegEnd:
self.solAbs = (1.-self.albedo)*self.solRec # (W m-2)
vegLat = 0.
vegSens = 0.
else: # Summer, veg
self.solAbs = ((1.-self.vegCoverage)*(1.-self.albedo)+self.vegCoverage*(1.-parameter.vegAlbedo))*self.solRec
vegLat = self.vegCoverage*parameter.grassFLat*(1.-parameter.vegAlbedo)*self.solRec
vegSens = self.vegCoverage*(1.-parameter.grassFLat)*(1.-parameter.vegAlbedo)*self.solRec
self.lat = soilLat + vegLat
# Sensible & net heat flux
self.sens = vegSens + self.aeroCond*(self.layerTemp[0]-tempRef)
self.flux = -self.sens + self.solAbs + self.infra - self.lat # (W m-2)
else: # For vertical surfaces (wall)
self.solAbs = (1.-self.albedo)*self.solRec
self.lat = 0.
# Sensible & net heat flux
self.sens = self.aeroCond*(self.layerTemp[0]-tempRef)
self.flux = -self.sens + self.solAbs + self.infra - self.lat # (W m-2)
self.layerTemp = self.Conduction(simTime.dt, self.flux, boundCond, forc.deepTemp, intFlux)
self.T_ext = self.layerTemp[0]
self.T_int = self.layerTemp[-1] | python | def SurfFlux(self,forc,parameter,simTime,humRef,tempRef,windRef,boundCond,intFlux):
""" Calculate net heat flux, and update element layer temperatures
"""
# Calculated per unit area (m^2)
dens = forc.pres/(1000*0.287042*tempRef*(1.+1.607858*humRef)) # air density (kgd m-3)
self.aeroCond = 5.8 + 3.7 * windRef # Convection coef (ref: uwg, eq. 12))
if (self.horizontal): # For roof, mass, road
# Evaporation (m s-1), Film water & soil latent heat
if not self.is_near_zero(self.waterStorage) and self.waterStorage > 0.0:
# N.B In the current uwg code, latent heat from evapotranspiration, stagnant water,
# or anthropogenic sources is not modelled due to the difficulty of validation, and
# lack of reliability of precipitation data from EPW files.Therefore this condition
# is never run because all elements have had their waterStorage hardcoded to 0.
qtsat = self.qsat([self.layerTemp[0]],[forc.pres],parameter)[0]
eg = self.aeroCond*parameter.colburn*dens*(qtsat-humRef)/parameter.waterDens/parameter.cp
self.waterStorage = min(self.waterStorage + simTime.dt*(forc.prec-eg),parameter.wgmax)
self.waterStorage = max(self.waterStorage,0.) # (m)
else:
eg = 0.
soilLat = eg*parameter.waterDens*parameter.lv
# Winter, no veg
if simTime.month < parameter.vegStart and simTime.month > parameter.vegEnd:
self.solAbs = (1.-self.albedo)*self.solRec # (W m-2)
vegLat = 0.
vegSens = 0.
else: # Summer, veg
self.solAbs = ((1.-self.vegCoverage)*(1.-self.albedo)+self.vegCoverage*(1.-parameter.vegAlbedo))*self.solRec
vegLat = self.vegCoverage*parameter.grassFLat*(1.-parameter.vegAlbedo)*self.solRec
vegSens = self.vegCoverage*(1.-parameter.grassFLat)*(1.-parameter.vegAlbedo)*self.solRec
self.lat = soilLat + vegLat
# Sensible & net heat flux
self.sens = vegSens + self.aeroCond*(self.layerTemp[0]-tempRef)
self.flux = -self.sens + self.solAbs + self.infra - self.lat # (W m-2)
else: # For vertical surfaces (wall)
self.solAbs = (1.-self.albedo)*self.solRec
self.lat = 0.
# Sensible & net heat flux
self.sens = self.aeroCond*(self.layerTemp[0]-tempRef)
self.flux = -self.sens + self.solAbs + self.infra - self.lat # (W m-2)
self.layerTemp = self.Conduction(simTime.dt, self.flux, boundCond, forc.deepTemp, intFlux)
self.T_ext = self.layerTemp[0]
self.T_int = self.layerTemp[-1] | [
"def",
"SurfFlux",
"(",
"self",
",",
"forc",
",",
"parameter",
",",
"simTime",
",",
"humRef",
",",
"tempRef",
",",
"windRef",
",",
"boundCond",
",",
"intFlux",
")",
":",
"# Calculated per unit area (m^2)",
"dens",
"=",
"forc",
".",
"pres",
"/",
"(",
"1000"... | Calculate net heat flux, and update element layer temperatures | [
"Calculate",
"net",
"heat",
"flux",
"and",
"update",
"element",
"layer",
"temperatures"
] | fb71f656b3cb69e7ccf1d851dff862e14fa210fc | https://github.com/ladybug-tools/uwg/blob/fb71f656b3cb69e7ccf1d851dff862e14fa210fc/uwg/element.py#L97-L145 | train | 39,491 |
wooster/biplist | biplist/__init__.py | readPlist | def readPlist(pathOrFile):
"""Raises NotBinaryPlistException, InvalidPlistException"""
didOpen = False
result = None
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
try:
reader = PlistReader(pathOrFile)
result = reader.parse()
except NotBinaryPlistException as e:
try:
pathOrFile.seek(0)
result = None
if hasattr(plistlib, 'loads'):
contents = None
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'rb') as f:
contents = f.read()
else:
contents = pathOrFile.read()
result = plistlib.loads(contents)
else:
result = plistlib.readPlist(pathOrFile)
result = wrapDataObject(result, for_binary=True)
except Exception as e:
raise InvalidPlistException(e)
finally:
if didOpen:
pathOrFile.close()
return result | python | def readPlist(pathOrFile):
"""Raises NotBinaryPlistException, InvalidPlistException"""
didOpen = False
result = None
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
try:
reader = PlistReader(pathOrFile)
result = reader.parse()
except NotBinaryPlistException as e:
try:
pathOrFile.seek(0)
result = None
if hasattr(plistlib, 'loads'):
contents = None
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'rb') as f:
contents = f.read()
else:
contents = pathOrFile.read()
result = plistlib.loads(contents)
else:
result = plistlib.readPlist(pathOrFile)
result = wrapDataObject(result, for_binary=True)
except Exception as e:
raise InvalidPlistException(e)
finally:
if didOpen:
pathOrFile.close()
return result | [
"def",
"readPlist",
"(",
"pathOrFile",
")",
":",
"didOpen",
"=",
"False",
"result",
"=",
"None",
"if",
"isinstance",
"(",
"pathOrFile",
",",
"(",
"bytes",
",",
"unicode",
")",
")",
":",
"pathOrFile",
"=",
"open",
"(",
"pathOrFile",
",",
"'rb'",
")",
"d... | Raises NotBinaryPlistException, InvalidPlistException | [
"Raises",
"NotBinaryPlistException",
"InvalidPlistException"
] | 4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82 | https://github.com/wooster/biplist/blob/4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82/biplist/__init__.py#L117-L147 | train | 39,492 |
wooster/biplist | biplist/__init__.py | PlistReader.getSizedInteger | def getSizedInteger(self, data, byteSize, as_number=False):
"""Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise."""
result = 0
if byteSize == 0:
raise InvalidPlistException("Encountered integer with byte size of 0.")
# 1, 2, and 4 byte integers are unsigned
elif byteSize == 1:
result = unpack('>B', data)[0]
elif byteSize == 2:
result = unpack('>H', data)[0]
elif byteSize == 4:
result = unpack('>L', data)[0]
elif byteSize == 8:
if as_number:
result = unpack('>q', data)[0]
else:
result = unpack('>Q', data)[0]
elif byteSize <= 16:
# Handle odd-sized or integers larger than 8 bytes
# Don't naively go over 16 bytes, in order to prevent infinite loops.
result = 0
if hasattr(int, 'from_bytes'):
result = int.from_bytes(data, 'big')
else:
for byte in data:
if not isinstance(byte, int): # Python3.0-3.1.x return ints, 2.x return str
byte = unpack_from('>B', byte)[0]
result = (result << 8) | byte
else:
raise InvalidPlistException("Encountered integer longer than 16 bytes.")
return result | python | def getSizedInteger(self, data, byteSize, as_number=False):
"""Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise."""
result = 0
if byteSize == 0:
raise InvalidPlistException("Encountered integer with byte size of 0.")
# 1, 2, and 4 byte integers are unsigned
elif byteSize == 1:
result = unpack('>B', data)[0]
elif byteSize == 2:
result = unpack('>H', data)[0]
elif byteSize == 4:
result = unpack('>L', data)[0]
elif byteSize == 8:
if as_number:
result = unpack('>q', data)[0]
else:
result = unpack('>Q', data)[0]
elif byteSize <= 16:
# Handle odd-sized or integers larger than 8 bytes
# Don't naively go over 16 bytes, in order to prevent infinite loops.
result = 0
if hasattr(int, 'from_bytes'):
result = int.from_bytes(data, 'big')
else:
for byte in data:
if not isinstance(byte, int): # Python3.0-3.1.x return ints, 2.x return str
byte = unpack_from('>B', byte)[0]
result = (result << 8) | byte
else:
raise InvalidPlistException("Encountered integer longer than 16 bytes.")
return result | [
"def",
"getSizedInteger",
"(",
"self",
",",
"data",
",",
"byteSize",
",",
"as_number",
"=",
"False",
")",
":",
"result",
"=",
"0",
"if",
"byteSize",
"==",
"0",
":",
"raise",
"InvalidPlistException",
"(",
"\"Encountered integer with byte size of 0.\"",
")",
"# 1,... | Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise. | [
"Numbers",
"of",
"8",
"bytes",
"are",
"signed",
"integers",
"when",
"they",
"refer",
"to",
"numbers",
"but",
"unsigned",
"otherwise",
"."
] | 4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82 | https://github.com/wooster/biplist/blob/4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82/biplist/__init__.py#L499-L529 | train | 39,493 |
wooster/biplist | biplist/__init__.py | PlistWriter.writeOffsetTable | def writeOffsetTable(self, output):
"""Writes all of the object reference offsets."""
all_positions = []
writtenReferences = list(self.writtenReferences.items())
writtenReferences.sort(key=lambda x: x[1])
for obj,order in writtenReferences:
# Porting note: Elsewhere we deliberately replace empty unicdoe strings
# with empty binary strings, but the empty unicode string
# goes into writtenReferences. This isn't an issue in Py2
# because u'' and b'' have the same hash; but it is in
# Py3, where they don't.
if bytes != str and obj == unicodeEmpty:
obj = b''
position = self.referencePositions.get(obj)
if position is None:
raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj)
output += self.binaryInt(position, self.trailer.offsetSize)
all_positions.append(position)
return output | python | def writeOffsetTable(self, output):
"""Writes all of the object reference offsets."""
all_positions = []
writtenReferences = list(self.writtenReferences.items())
writtenReferences.sort(key=lambda x: x[1])
for obj,order in writtenReferences:
# Porting note: Elsewhere we deliberately replace empty unicdoe strings
# with empty binary strings, but the empty unicode string
# goes into writtenReferences. This isn't an issue in Py2
# because u'' and b'' have the same hash; but it is in
# Py3, where they don't.
if bytes != str and obj == unicodeEmpty:
obj = b''
position = self.referencePositions.get(obj)
if position is None:
raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj)
output += self.binaryInt(position, self.trailer.offsetSize)
all_positions.append(position)
return output | [
"def",
"writeOffsetTable",
"(",
"self",
",",
"output",
")",
":",
"all_positions",
"=",
"[",
"]",
"writtenReferences",
"=",
"list",
"(",
"self",
".",
"writtenReferences",
".",
"items",
"(",
")",
")",
"writtenReferences",
".",
"sort",
"(",
"key",
"=",
"lambd... | Writes all of the object reference offsets. | [
"Writes",
"all",
"of",
"the",
"object",
"reference",
"offsets",
"."
] | 4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82 | https://github.com/wooster/biplist/blob/4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82/biplist/__init__.py#L906-L924 | train | 39,494 |
wooster/biplist | biplist/__init__.py | PlistWriter.intSize | def intSize(self, obj):
"""Returns the number of bytes necessary to store the given integer."""
# SIGNED
if obj < 0: # Signed integer, always 8 bytes
return 8
# UNSIGNED
elif obj <= 0xFF: # 1 byte
return 1
elif obj <= 0xFFFF: # 2 bytes
return 2
elif obj <= 0xFFFFFFFF: # 4 bytes
return 4
# SIGNED
# 0x7FFFFFFFFFFFFFFF is the max.
elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed
return 8
elif obj <= 0xffffffffffffffff: # 8 bytes unsigned
return 16
else:
raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.") | python | def intSize(self, obj):
"""Returns the number of bytes necessary to store the given integer."""
# SIGNED
if obj < 0: # Signed integer, always 8 bytes
return 8
# UNSIGNED
elif obj <= 0xFF: # 1 byte
return 1
elif obj <= 0xFFFF: # 2 bytes
return 2
elif obj <= 0xFFFFFFFF: # 4 bytes
return 4
# SIGNED
# 0x7FFFFFFFFFFFFFFF is the max.
elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed
return 8
elif obj <= 0xffffffffffffffff: # 8 bytes unsigned
return 16
else:
raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.") | [
"def",
"intSize",
"(",
"self",
",",
"obj",
")",
":",
"# SIGNED",
"if",
"obj",
"<",
"0",
":",
"# Signed integer, always 8 bytes",
"return",
"8",
"# UNSIGNED",
"elif",
"obj",
"<=",
"0xFF",
":",
"# 1 byte",
"return",
"1",
"elif",
"obj",
"<=",
"0xFFFF",
":",
... | Returns the number of bytes necessary to store the given integer. | [
"Returns",
"the",
"number",
"of",
"bytes",
"necessary",
"to",
"store",
"the",
"given",
"integer",
"."
] | 4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82 | https://github.com/wooster/biplist/blob/4c3d0c94132621188ca4c1fa9bda3b2d4d52ab82/biplist/__init__.py#L955-L974 | train | 39,495 |
ladybug-tools/uwg | uwg/RSMDef.py | RSMDef.load_z_meso | def load_z_meso(self,z_meso_path):
""" Open the z_meso.txt file and return heights as list """
self.z_meso = []
z_meso_file_path = os.path.join(z_meso_path, self.Z_MESO_FILE_NAME)
# Check if exists
if not os.path.exists(z_meso_file_path):
raise Exception("z_meso.txt file: '{}' does not exist.".format(uwg_param_file))
f = open(z_meso_file_path,'r')
for txtline in f:
z_ = float("".join(txtline.split())) # Strip all white spaces and change to float
self.z_meso.append(z_)
f.close() | python | def load_z_meso(self,z_meso_path):
""" Open the z_meso.txt file and return heights as list """
self.z_meso = []
z_meso_file_path = os.path.join(z_meso_path, self.Z_MESO_FILE_NAME)
# Check if exists
if not os.path.exists(z_meso_file_path):
raise Exception("z_meso.txt file: '{}' does not exist.".format(uwg_param_file))
f = open(z_meso_file_path,'r')
for txtline in f:
z_ = float("".join(txtline.split())) # Strip all white spaces and change to float
self.z_meso.append(z_)
f.close() | [
"def",
"load_z_meso",
"(",
"self",
",",
"z_meso_path",
")",
":",
"self",
".",
"z_meso",
"=",
"[",
"]",
"z_meso_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"z_meso_path",
",",
"self",
".",
"Z_MESO_FILE_NAME",
")",
"# Check if exists",
"if",
"not",... | Open the z_meso.txt file and return heights as list | [
"Open",
"the",
"z_meso",
".",
"txt",
"file",
"and",
"return",
"heights",
"as",
"list"
] | fb71f656b3cb69e7ccf1d851dff862e14fa210fc | https://github.com/ladybug-tools/uwg/blob/fb71f656b3cb69e7ccf1d851dff862e14fa210fc/uwg/RSMDef.py#L140-L154 | train | 39,496 |
dcos/shakedown | shakedown/dcos/task.py | get_task | def get_task(task_id, completed=True):
""" Get a task by task id where a task_id is required.
:param task_id: task ID
:type task_id: str
:param completed: include completed tasks?
:type completed: bool
:return: a task
:rtype: obj
"""
tasks = get_tasks(task_id=task_id, completed=completed)
if len(tasks) == 0:
return None
assert len(tasks) == 1, 'get_task should return at max 1 task for a task id'
return tasks[0] | python | def get_task(task_id, completed=True):
""" Get a task by task id where a task_id is required.
:param task_id: task ID
:type task_id: str
:param completed: include completed tasks?
:type completed: bool
:return: a task
:rtype: obj
"""
tasks = get_tasks(task_id=task_id, completed=completed)
if len(tasks) == 0:
return None
assert len(tasks) == 1, 'get_task should return at max 1 task for a task id'
return tasks[0] | [
"def",
"get_task",
"(",
"task_id",
",",
"completed",
"=",
"True",
")",
":",
"tasks",
"=",
"get_tasks",
"(",
"task_id",
"=",
"task_id",
",",
"completed",
"=",
"completed",
")",
"if",
"len",
"(",
"tasks",
")",
"==",
"0",
":",
"return",
"None",
"assert",
... | Get a task by task id where a task_id is required.
:param task_id: task ID
:type task_id: str
:param completed: include completed tasks?
:type completed: bool
:return: a task
:rtype: obj | [
"Get",
"a",
"task",
"by",
"task",
"id",
"where",
"a",
"task_id",
"is",
"required",
"."
] | e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e | https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/task.py#L32-L49 | train | 39,497 |
dcos/shakedown | shakedown/dcos/task.py | task_completed | def task_completed(task_id):
""" Check whether a task has completed.
:param task_id: task ID
:type task_id: str
:return: True if completed, False otherwise
:rtype: bool
"""
tasks = get_tasks(task_id=task_id)
completed_states = ('TASK_FINISHED',
'TASK_FAILED',
'TASK_KILLED',
'TASK_LOST',
'TASK_ERROR')
for task in tasks:
if task['state'] in completed_states:
return True
return False | python | def task_completed(task_id):
""" Check whether a task has completed.
:param task_id: task ID
:type task_id: str
:return: True if completed, False otherwise
:rtype: bool
"""
tasks = get_tasks(task_id=task_id)
completed_states = ('TASK_FINISHED',
'TASK_FAILED',
'TASK_KILLED',
'TASK_LOST',
'TASK_ERROR')
for task in tasks:
if task['state'] in completed_states:
return True
return False | [
"def",
"task_completed",
"(",
"task_id",
")",
":",
"tasks",
"=",
"get_tasks",
"(",
"task_id",
"=",
"task_id",
")",
"completed_states",
"=",
"(",
"'TASK_FINISHED'",
",",
"'TASK_FAILED'",
",",
"'TASK_KILLED'",
",",
"'TASK_LOST'",
",",
"'TASK_ERROR'",
")",
"for",
... | Check whether a task has completed.
:param task_id: task ID
:type task_id: str
:return: True if completed, False otherwise
:rtype: bool | [
"Check",
"whether",
"a",
"task",
"has",
"completed",
"."
] | e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e | https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/task.py#L59-L80 | train | 39,498 |
dcos/shakedown | shakedown/dcos/task.py | task_property_present_predicate | def task_property_present_predicate(service, task, prop):
""" True if the json_element passed is present for the task specified.
"""
try:
response = get_service_task(service, task)
except Exception as e:
pass
return (response is not None) and (prop in response) | python | def task_property_present_predicate(service, task, prop):
""" True if the json_element passed is present for the task specified.
"""
try:
response = get_service_task(service, task)
except Exception as e:
pass
return (response is not None) and (prop in response) | [
"def",
"task_property_present_predicate",
"(",
"service",
",",
"task",
",",
"prop",
")",
":",
"try",
":",
"response",
"=",
"get_service_task",
"(",
"service",
",",
"task",
")",
"except",
"Exception",
"as",
"e",
":",
"pass",
"return",
"(",
"response",
"is",
... | True if the json_element passed is present for the task specified. | [
"True",
"if",
"the",
"json_element",
"passed",
"is",
"present",
"for",
"the",
"task",
"specified",
"."
] | e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e | https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/task.py#L107-L115 | train | 39,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.